diff --git a/.gitmodules b/.gitmodules index a9b5ffbe..7a602fab 100644 --- a/.gitmodules +++ b/.gitmodules @@ -25,3 +25,6 @@ [submodule "envoy-data-plane-api/protobuf"] path = envoy-data-plane-api/protobuf url = https://github.com/protocolbuffers/protobuf.git +[submodule "envoy-data-plane-api/udpa"] + path = envoy-data-plane-api/udpa + url = https://github.com/cncf/udpa.git diff --git a/Cargo.lock b/Cargo.lock index 26d29ec8..be7fc037 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -66,12 +66,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -139,9 +133,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.99" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arc-swap" @@ -273,9 +267,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.13.3" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c953fe1ba023e6b7730c0d4b031d06f267f23a46167dcbd40316644b10a17ba" +checksum = "94b8ff6c09cd57b16da53641caa860168b88c172a5ee163b0288d3d6eea12786" dependencies = [ "aws-lc-sys", "zeroize", @@ -283,9 +277,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfd150b5dbdb988bcc8fb1fe787eb6b7ee6180ca24da683b61ea5405f3d43ff" +checksum = "0e44d16778acaf6a9ec9899b92cebd65580b83f685446bf2e1f5d3d732f99dcd" dependencies = [ "bindgen", "cc", @@ -464,32 +458,29 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.5" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ "bitflags", "cexpr", "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 2.1.1", "shlex", "syn", - "which", ] [[package]] name = "bitflags" -version = "2.9.3" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" dependencies = [ "serde", ] @@ -526,9 +517,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" +checksum = "f5c434ae3cf0089ca203e9019ebe529c47ff45cefe8af7c85ecb734ef541822f" [[package]] name = "caps" @@ -557,10 +548,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.34" +version = "1.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -595,16 +587,15 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-link", + "windows-link 0.2.0", ] [[package]] @@ -647,9 +638,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.46" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" +checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" dependencies = [ "clap_builder", "clap_derive", @@ -657,9 +648,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.46" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" +checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" dependencies = [ "anstream", "anstyle", @@ -669,9 +660,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.45" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6" +checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" dependencies = [ "heck", "proc-macro2", @@ -873,9 +864,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc" dependencies = [ "powerfmt", ] @@ -890,7 +881,7 @@ dependencies = [ "lazy_static", "mintex", "parking_lot", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "thousands", @@ -973,12 +964,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -1005,6 +996,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "find-msvc-tools" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" + [[package]] name = "fixedbitset" version = "0.5.7" @@ -1141,7 +1138,7 @@ dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.3+wasi-0.2.4", + "wasi 0.14.7+wasi-0.2.4", ] [[package]] @@ -1168,7 +1165,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -1203,9 +1200,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.5" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "heck" @@ -1270,15 +1267,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "home" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" -dependencies = [ - "windows-sys 0.59.0", -] - [[package]] name = "http" version = "1.3.1" @@ -1337,9 +1325,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "humantime-serde" @@ -1431,11 +1419,36 @@ dependencies = [ "windows-registry", ] +[[package]] +name = "hyper_unix_listener" +version = "0.1.0" +dependencies = [ + "clap", + "hyper", + "hyperlocal", + "tokio", +] + +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1562,6 +1575,16 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "if-addrs" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -1574,12 +1597,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", ] [[package]] @@ -1661,9 +1684,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -1715,9 +1738,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", @@ -1740,12 +1763,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.175" @@ -1754,12 +1771,12 @@ checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libloading" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.53.3", + "windows-link 0.2.0", ] [[package]] @@ -1776,9 +1793,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -1798,9 +1815,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "lru-cache" @@ -1813,11 +1830,11 @@ dependencies = [ [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -1910,12 +1927,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -2226,6 +2242,8 @@ dependencies = [ "hyper", "hyper-rustls", "hyper-util", + "hyperlocal", + "if-addrs", "ipnet", "once_cell", "opentelemetry", @@ -2245,7 +2263,7 @@ dependencies = [ "pretty-duration", "rand 0.8.5", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "rustls", "rustls-pemfile", "rustls-platform-verifier", @@ -2254,6 +2272,7 @@ dependencies = [ "serde", "serde_yaml", "smol_str", + "socket2 0.6.0", "thiserror 1.0.69", "thread-id", "thread_local", @@ -2370,6 +2389,7 @@ dependencies = [ "bytes", "futures", "http", + "hyper-util", "orion-configuration", "orion-data-plane-api", "orion-error", @@ -2384,12 +2404,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "parking_lot" version = "0.12.4" @@ -2432,7 +2446,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.11.0", + "indexmap 2.11.4", ] [[package]] @@ -2517,9 +2531,9 @@ checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -2824,17 +2838,8 @@ checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.10", - "regex-syntax 0.8.6", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -2845,15 +2850,9 @@ checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.6", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.6" @@ -2905,9 +2904,9 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "ring" @@ -2950,6 +2949,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rusticata-macros" version = "4.1.0" @@ -2974,28 +2979,28 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys 0.9.4", - "windows-sys 0.60.2", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.0", ] [[package]] name = "rustls" -version = "0.23.31" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "aws-lc-rs", "log", "once_cell", "rustls-pki-types", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "subtle", "zeroize", ] @@ -3022,7 +3027,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.3.0", + "security-framework 3.5.0", ] [[package]] @@ -3083,9 +3088,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "aws-lc-rs", "ring", @@ -3116,11 +3121,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -3145,9 +3150,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +checksum = "cc198e42d9b7510827939c9a15f5062a0c913f3371d765977e586d2fe6c16f4a" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -3158,9 +3163,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -3168,10 +3173,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" dependencies = [ + "serde_core", "serde_derive", ] @@ -3185,11 +3191,20 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", @@ -3198,24 +3213,26 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.143" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -3246,7 +3263,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -3397,15 +3414,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.21.0" +version = "3.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53" dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", - "rustix 1.0.8", - "windows-sys 0.60.2", + "rustix 1.1.2", + "windows-sys 0.61.0", ] [[package]] @@ -3506,9 +3523,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -3521,15 +3538,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -3603,9 +3620,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ "rustls", "tokio", @@ -3828,14 +3845,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "sharded-slab", "smallvec", "thread_local", @@ -3904,9 +3921,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unsafe-libyaml" @@ -3946,9 +3963,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", @@ -3994,30 +4011,40 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.3+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" +dependencies = [ + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", @@ -4029,9 +4056,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "a0b221ff421256839509adbb55998214a70d829d3a28c69b4a6672e9d2a42f67" dependencies = [ "cfg-if", "js-sys", @@ -4042,9 +4069,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4052,9 +4079,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", @@ -4065,18 +4092,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "fbe734895e869dc429d78c4b433f8d17d95f8d05317440b4fad5ab2d33e596dc" dependencies = [ "js-sys", "wasm-bindgen", @@ -4100,18 +4127,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "widestring" version = "1.2.0" @@ -4136,11 +4151,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -4151,15 +4166,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "57fe7168f7de578d2d8a05b07fd61870d2e73b4020e9f49aa00da8471723497c" dependencies = [ "windows-implement", "windows-interface", - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.2.0", + "windows-result 0.4.0", + "windows-strings 0.5.0", ] [[package]] @@ -4190,15 +4205,21 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + [[package]] name = "windows-registry" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", ] [[package]] @@ -4207,7 +4228,16 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-link", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-result" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +dependencies = [ + "windows-link 0.2.0", ] [[package]] @@ -4216,7 +4246,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-link", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +dependencies = [ + "windows-link 0.2.0", ] [[package]] @@ -4255,6 +4294,15 @@ dependencies = [ "windows-targets 0.53.3", ] +[[package]] +name = "windows-sys" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +dependencies = [ + "windows-link 0.2.0", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -4292,7 +4340,7 @@ version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" dependencies = [ - "windows-link", + "windows-link 0.1.3", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -4453,9 +4501,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" @@ -4512,18 +4560,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index b2011252..e2ac9004 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ members = [ "orion-metrics", "orion-proxy", "orion-tracing", - "orion-xds", + "orion-xds", "tools/hyper_unix_listener", ] resolver = "2" diff --git a/NOTES.txt b/NOTES.txt new file mode 100644 index 00000000..66261a61 --- /dev/null +++ b/NOTES.txt @@ -0,0 +1,174 @@ +Istio Proxy/Envoy CMD params +/usr/local/bin/envoy -c etc/istio/proxy/envoy-rev.json --drain-time-s 45 --drain-strategy immediate --local-address-ip-version v4 --file-flush-interval-msec 1000 --disable-hot-restart --allow-unknown-static-fields -l warning --component-log-level misc:error --skip-deprecated-logs --concurrency 2 + +k exec -ti details-v1-766844796b-fg5fr -c istio-proxy -- /bin/bash + +/pkg/config/constants/constants.go -- name of the binary +/pkg/envoy/proxy.go -- params passed to orion + +make docker.proxyv2 + +docker tag localhost:5000/pilot:latest istio/pilot:1.28-dev +docker tag localhost:5000/proxyv2:latest istio/proxyv2:1.28-dev + +kind load docker-image istio/proxyv2:1.28-dev --name envoy-gateway +kind load docker-image istio/pilot:1.28-dev --name envoy-gateway + +k logs -c istio-proxy details-v1-766844796b-4sdwd + + +Orion location for building docker images +cp ../kmesh-orion/orion-proxy/conf/orion-conf.yaml ./out/linux_amd64/dockerx_build/build.docker.proxyv2/amd64/ +cp ../kmesh-orion/orion-proxy/conf/orion-conf.yaml ./out/linux_amd64/dockerx_build/build.docker.proxyv2/ +cp ../kmesh-orion/orion-proxy/conf/orion-conf.yaml ./out/linux_amd64/release/ +cp ../kmesh-orion/orion-proxy/conf/orion-conf.yaml ./out/linux_amd64/ + + +cp ../kmesh-orion/target/debug/orion ./out/linux_amd64/dockerx_build/build.docker.proxyv2/amd64/ +cp ../kmesh-orion/target/debug/orion ./out/linux_amd64/release/ +cp ../kmesh-orion/target/debug/orion ./out/linux_amd64/ + + +./out/linux_amd64/dockerx_build/build.docker.proxyv2/amd64/orion --config ../kmesh-orion/orion-proxy/conf/orion-runtime.yaml --with-envoy-bootstrap /home/dawid/Workspace/istio-orion-demo/envoy-rev.json + + + +Change UnixClient::unix(), it selects Http1 by default + #[must_use] + fn unix() -> Client + where + B::Data: Send, + { + Client::builder(TokioExecutor::new()).build(UnixConnector) + } + + + + + +/usr/local/bin/pilot-agent proxy sidecar --domain default.svc.cluster.local --proxyLogLevel=warning --proxyComponentLogLevel=misc:error --log_output_level=default:info +./out/linux_amd64/pilot-agent proxy sidecar --domain default.svc.cluster.local --proxyLogLevel=debug --proxyComponentLogLevel=all:debug --log_output_level=all:debug + +/etc/istio/proxy/grpc-bootstrap.json +{ + "xds_servers": [ + { + "server_uri": "unix:///etc/istio/proxy/XDS", + "channel_creds": [ + { + "type": "insecure" + } + ], + "server_features": [ + "xds_v3" + ] + } + ], + "node": { + "id": "sidecar~10.244.0.57~details-v1-766844796b-gz9gr.default~default.svc.cluster.local", + "metadata": { + "ANNOTATIONS": { + "istio.io/rev": "default", + "kubectl.kubernetes.io/default-container": "details", + "kubectl.kubernetes.io/default-logs-container": "details", + "kubernetes.io/config.seen": "2025-09-04T09:32:25.872044874Z", + "kubernetes.io/config.source": "api", + "prometheus.io/path": "/stats/prometheus", + "prometheus.io/port": "15020", + "prometheus.io/scrape": "true", + "sidecar.istio.io/status": "{\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"workload-socket\",\"credential-socket\",\"workload-certs\",\"istio-envoy\",\"istio-data\",\"istio-podinfo\",\"istio-token\" +,\"istiod-ca-cert\",\"istio-ca-crl\"],\"imagePullSecrets\":null,\"revision\":\"default\"}" + }, + "APP_CONTAINERS": "details", + "CLUSTER_ID": "Kubernetes", + "ENVOY_PROMETHEUS_PORT": 15090, + "ENVOY_SKIP_DEPRECATED_LOGS": "true", + "ENVOY_STATUS_PORT": 15021, + "GENERATOR": "grpc", + "INSTANCE_IPS": "10.244.0.57", + "INTERCEPTION_MODE": "REDIRECT", + "ISTIO_PROXY_SHA": "dd6dd2104dc107fd5f5da434f1a0424ec1099943", + "ISTIO_VERSION": "1.28-dev", + "LABELS": { + "app": "details", + "security.istio.io/tlsMode": "istio", + "service.istio.io/canonical-name": "details", + "service.istio.io/canonical-revision": "v1", + "version": "v1" + }, + "MESH_ID": "cluster.local", + "METADATA_DISCOVERY": "false", + "NAME": "details-v1-766844796b-gz9gr", + "NAMESPACE": "default", + "NODE_NAME": "envoy-gateway-control-plane", + "OWNER": "kubernetes://apis/apps/v1/namespaces/default/deployments/details-v1", + "PILOT_SAN": [ + "istiod.istio-system.svc" + ], + "POD_PORTS": "[{\"containerPort\":9080,\"protocol\":\"TCP\"}]", + "PROXY_CONFIG": { + "binaryPath": "/usr/local/bin/orion", + "concurrency": 2, + "configPath": "./etc/istio/proxy", + "controlPlaneAuthPolicy": "MUTUAL_TLS", + "discoveryAddress": "istiod.istio-system.svc:15012", + "drainDuration": "45s", + "proxyAdminPort": 15000, + "serviceCluster": "istio-proxy", + "statNameLength": 189, + "statusPort": 15020, + "terminationDrainDuration": "5s" + }, + "SERVICE_ACCOUNT": "bookinfo-details", + "WORKLOAD_IDENTITY_SOCKET_FILE": "socket", + "WORKLOAD_NAME": "details-v1" + }, + "locality": {}, + "UserAgentVersionType": null + }, + "server_listener_resource_name_template": "xds.istio.io/grpc/lds/inbound/%s" +} + + +k cp -c istio-proxy details-v1-766844796b-zj22b:/var/run/secrets/istio/root-cert.pem . + + +/usr/local/bin/pilot-discovery discovery --monitoringAddr=:15014 --log_output_level=default:info --domain cluster.local --keepaliveMaxServerConnectionAge 30m +./out/linux_amd64/pilot-discovery discovery --monitoringAddr=:15014 --log_output_level=all:debug --domain cluster.local --keepaliveMaxServerConnectionAge 30m + + + +Istio IPTables and Kind + +docker exec into kind cluster +find pod +root@envoy-gateway-control-plane:/# ps auxf // search for pid of orion +sudo nsenter -t ${PROXY_PID} -n iptables-save + +*nat +:PREROUTING ACCEPT [810:48600] +:INPUT ACCEPT [817:49020] +:OUTPUT ACCEPT [322:21250] +:POSTROUTING ACCEPT [0:0] +:ISTIO_INBOUND - [0:0] +:ISTIO_IN_REDIRECT - [0:0] +:ISTIO_OUTPUT - [0:0] +:ISTIO_REDIRECT - [0:0] +-A PREROUTING -p tcp -j ISTIO_INBOUND +-A OUTPUT -j ISTIO_OUTPUT +-A ISTIO_INBOUND -p tcp -m tcp --dport 15008 -j RETURN +-A ISTIO_INBOUND -p tcp -m tcp --dport 15090 -j RETURN +-A ISTIO_INBOUND -p tcp -m tcp --dport 15021 -j RETURN +-A ISTIO_INBOUND -p tcp -m tcp --dport 15020 -j RETURN +-A ISTIO_INBOUND -p tcp -j ISTIO_IN_REDIRECT +-A ISTIO_IN_REDIRECT -p tcp -j REDIRECT --to-ports 15006 +-A ISTIO_OUTPUT -s 127.0.0.6/32 -o lo -j RETURN +-A ISTIO_OUTPUT ! -d 127.0.0.1/32 -o lo -p tcp -m tcp ! --dport 15008 -m owner --uid-owner 1337 -j ISTIO_IN_REDIRECT +-A ISTIO_OUTPUT -o lo -m owner ! --uid-owner 1337 -j RETURN +-A ISTIO_OUTPUT -m owner --uid-owner 1337 -j RETURN +-A ISTIO_OUTPUT ! -d 127.0.0.1/32 -o lo -p tcp -m tcp ! --dport 15008 -m owner --gid-owner 1337 -j ISTIO_IN_REDIRECT +-A ISTIO_OUTPUT -o lo -m owner ! --gid-owner 1337 -j RETURN +-A ISTIO_OUTPUT -m owner --gid-owner 1337 -j RETURN +-A ISTIO_OUTPUT -d 127.0.0.1/32 -j RETURN +-A ISTIO_OUTPUT -j ISTIO_REDIRECT +-A ISTIO_REDIRECT -p tcp -j REDIRECT --to-ports 15001 \ No newline at end of file diff --git a/envoy-data-plane-api/build.rs b/envoy-data-plane-api/build.rs index deb523bc..7314e216 100644 --- a/envoy-data-plane-api/build.rs +++ b/envoy-data-plane-api/build.rs @@ -6,6 +6,8 @@ use glob::glob; fn main() -> std::io::Result<()> { let descriptor_path = PathBuf::from(std::env::var("OUT_DIR").unwrap()).join("proto_descriptor.bin"); let protos: Vec = glob("data-plane-api/envoy/**/v3/*.proto").unwrap().filter_map(Result::ok).collect(); + let istio_protos: Vec = glob("udpa/udpa/type/v1/*.proto").unwrap().filter_map(Result::ok).collect(); + let protos: Vec = protos.into_iter().chain(istio_protos).collect(); let include_paths = [ "data-plane-api/", @@ -17,6 +19,7 @@ fn main() -> std::io::Result<()> { "prometheus-client-model/", "cel-spec/proto", "protobuf/src/", + "udpa/udpa/type/v1", ]; let mut config = prost_build::Config::new(); diff --git a/envoy-data-plane-api/udpa b/envoy-data-plane-api/udpa new file mode 160000 index 00000000..c52dc94e --- /dev/null +++ b/envoy-data-plane-api/udpa @@ -0,0 +1 @@ +Subproject commit c52dc94e7fbe6449d8465faaeda22c76ca62d4ff diff --git a/orion-configuration/src/config.rs b/orion-configuration/src/config.rs index 6e7714fc..4ab985a4 100644 --- a/orion-configuration/src/config.rs +++ b/orion-configuration/src/config.rs @@ -85,11 +85,22 @@ pub fn deserialize_yaml(path: &Path) -> Result { serde_path_to_error::deserialize(serde_yaml::Deserializer::from_reader(&file)).map_err(crate::Error::from) } +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct ConfigSource { + pub config_source_specifier: ConfigSourceSpecifier, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub enum ConfigSourceSpecifier { + ADS, +} + #[cfg(feature = "envoy-conversions")] mod envoy_conversions { use std::path::Path; use super::{deserialize_yaml, log::AccessLogConfig, Bootstrap, Config}; + use crate::config::common::envoy_conversions::IsUsed; use crate::{ config::{log::LogConfig, runtime::Runtime}, options::Options, @@ -97,9 +108,15 @@ mod envoy_conversions { }; use orion_data_plane_api::decode::from_serde_deserializer; pub use orion_data_plane_api::envoy_data_plane_api::envoy::config::bootstrap::v3::Bootstrap as EnvoyBootstrap; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::{ + config_source::ConfigSourceSpecifier as EnvoyConfigSourceSpecifier, AggregatedConfigSource, + ConfigSource as EnvoyConfigSource, + }; use orion_error::{Context, ErrorInfo}; use serde::Deserialize; + use crate::config::{self, convert_opt, unsupported_field, ConfigSource, ConfigSourceSpecifier, GenericError}; + #[derive(Deserialize)] struct Wrapper(#[serde(deserialize_with = "from_serde_deserializer")] EnvoyBootstrap); @@ -164,6 +181,43 @@ mod envoy_conversions { Ok(config.apply_options(opt)) } } + + impl TryFrom for ConfigSource { + type Error = GenericError; + fn try_from( + value: EnvoyConfigSource, + ) -> std::result::Result { + let EnvoyConfigSource { + authorities, + initial_fetch_timeout: _, + resource_api_version: _, + config_source_specifier, + } = value; + unsupported_field!(authorities)?; + let config_source_specifier = convert_opt!(config_source_specifier)?; + Ok(Self { config_source_specifier }) + } + } + + impl TryFrom for ConfigSourceSpecifier { + type Error = GenericError; + fn try_from( + value: EnvoyConfigSourceSpecifier, + ) -> std::result::Result { + match value { + EnvoyConfigSourceSpecifier::Ads(AggregatedConfigSource {}) => Ok(Self::ADS), + EnvoyConfigSourceSpecifier::ApiConfigSource(_) => { + Err(GenericError::unsupported_variant("ApiConfigSource")) + }, + EnvoyConfigSourceSpecifier::Path(_) => Err(GenericError::unsupported_variant("Path")), + EnvoyConfigSourceSpecifier::PathConfigSource(_) => { + Err(GenericError::unsupported_variant("PathConfigSource")) + }, + EnvoyConfigSourceSpecifier::Self_(_) => Err(GenericError::unsupported_variant("Self_")), + } + } + } + #[cfg(test)] mod tests { use crate::{config::Config, options::Options, Result}; diff --git a/orion-configuration/src/config/bootstrap.rs b/orion-configuration/src/config/bootstrap.rs index 567e1162..f7c78579 100644 --- a/orion-configuration/src/config/bootstrap.rs +++ b/orion-configuration/src/config/bootstrap.rs @@ -48,10 +48,20 @@ impl Bootstrap { } } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct Node { pub id: CompactString, pub cluster_id: CompactString, + #[serde(skip_serializing, skip_deserializing)] + pub metadata: Option, +} + +impl Eq for Node {} + +impl PartialEq for Node { + fn eq(&self, other: &Self) -> bool { + self.id == other.id && self.cluster_id == other.cluster_id + } } impl Node { @@ -126,13 +136,13 @@ mod envoy_conversions { hds_config, flags_path, stats_sinks, - deferred_stat_options, - stats_config, + deferred_stat_options: _, + stats_config: _, stats_flush_interval, watchdog, watchdogs, tracing, - layered_runtime, + layered_runtime: _, admin, overload_manager, enable_dispatcher_stats, @@ -141,7 +151,7 @@ mod envoy_conversions { use_tcp_for_dns_lookups, dns_resolution_config, typed_dns_resolver_config, - bootstrap_extensions, + bootstrap_extensions: _, fatal_actions, config_sources, default_config_source, @@ -153,7 +163,7 @@ mod envoy_conversions { xds_delegate_extension, xds_config_tracker_extension, listener_manager, - application_log_config, + application_log_config: _, grpc_async_client_manager_config, stats_flush, memory_allocator_manager, @@ -168,13 +178,13 @@ mod envoy_conversions { hds_config, flags_path, // stats_sinks, - deferred_stat_options, - stats_config, + //deferred_stat_options, + //stats_config, // stats_flush_interval, watchdog, watchdogs, tracing, - layered_runtime, + //layered_runtime, //admin, overload_manager, enable_dispatcher_stats, @@ -183,7 +193,7 @@ mod envoy_conversions { use_tcp_for_dns_lookups, dns_resolution_config, typed_dns_resolver_config, - bootstrap_extensions, + //bootstrap_extensions, fatal_actions, config_sources, default_config_source, @@ -195,7 +205,7 @@ mod envoy_conversions { xds_delegate_extension, xds_config_tracker_extension, listener_manager, - application_log_config, + //application_log_config, grpc_async_client_manager_config, stats_flush, memory_allocator_manager @@ -231,9 +241,9 @@ mod envoy_conversions { cluster, metadata, dynamic_parameters, - locality, + locality: _, user_agent_name, - extensions, + extensions: _, client_features, listening_addresses, user_agent_version_type, @@ -241,53 +251,54 @@ mod envoy_conversions { unsupported_field!( // id, //cluster, - metadata, + //metadata, dynamic_parameters, - locality, + //locality, user_agent_name, - extensions, + //extensions, client_features, listening_addresses, user_agent_version_type )?; + let id = required!(id)?.into(); let cluster = required!(cluster)?.into(); - Ok(Self { id, cluster_id: cluster }) + Ok(Self { id, cluster_id: cluster, metadata }) } } impl TryFrom for DynamicResources { type Error = GenericError; fn try_from(value: EnvoyDynamicResources) -> Result { let EnvoyDynamicResources { - lds_config, + lds_config: _, lds_resources_locator, - cds_config, + cds_config: _, cds_resources_locator, ads_config, } = value; - unsupported_field!(lds_config, lds_resources_locator, cds_config, cds_resources_locator)?; + unsupported_field!(lds_resources_locator, cds_resources_locator)?; let EnvoyApiConfigSource { - api_type, - transport_api_version, - cluster_names, + api_type: _, + transport_api_version: _, + cluster_names: _, grpc_services, refresh_delay, request_timeout, rate_limit_settings, - set_node_on_first_message_only, + set_node_on_first_message_only: _, config_validators, } = required!(ads_config)?; let grpc_cluster_specifiers = (|| -> Result<_, GenericError> { unsupported_field!( //todo(hayley): are these required to be set? - api_type, - transport_api_version, - cluster_names, + // api_type, + // transport_api_version, + // cluster_names, // grpc_services, refresh_delay, request_timeout, rate_limit_settings, - set_node_on_first_message_only, + //set_node_on_first_message_only, config_validators )?; (|| -> Result<_, GenericError> { @@ -343,14 +354,14 @@ mod envoy_conversions { type Error = GenericError; fn try_from(envoy: EnvoyAdmin) -> Result { let EnvoyAdmin { - access_log, + access_log: _, access_log_path, - profile_path, + profile_path: _, address, socket_options, - ignore_global_conn_limit, + ignore_global_conn_limit: _, } = envoy; - unsupported_field!(access_log, access_log_path, profile_path, socket_options, ignore_global_conn_limit)?; + unsupported_field!(access_log_path, socket_options)?; let address = match required!(address)? .address .ok_or(GenericError::MissingField("address is mandatory to setup admin interface"))? diff --git a/orion-configuration/src/config/cluster.rs b/orion-configuration/src/config/cluster.rs index c04beb02..70c0644f 100644 --- a/orion-configuration/src/config/cluster.rs +++ b/orion-configuration/src/config/cluster.rs @@ -25,16 +25,17 @@ pub use http_protocol_options::HttpProtocolOptions; pub mod cluster_specifier; pub use cluster_specifier::ClusterSpecifier; -use crate::config::core::Address; + +use crate::config::{core::Address, transport::BindDeviceOptions, ConfigSource}; use super::{ common::{is_default, MetadataKey}, secret::TlsCertificate, - transport::{BindDevice, CommonTlsValidationContext, TlsParameters, UpstreamTransportSocketConfig}, + transport::{CommonTlsValidationContext, TlsParameters, UpstreamTransportSocketConfig}, }; use compact_str::CompactString; -use http::HeaderName; +use http::{HeaderName}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::{fmt::Display, num::NonZeroU32, time::Duration}; #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] @@ -44,8 +45,8 @@ pub struct Cluster { pub discovery_settings: ClusterDiscoveryType, #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] pub transport_socket: Option, - #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] - pub bind_device: Option, + #[serde(default = "Default::default")] + pub bind_device_options: BindDeviceOptions, #[serde(skip_serializing_if = "is_default", default)] pub load_balancing_policy: LbPolicy, #[serde(skip_serializing_if = "is_default", default)] @@ -67,6 +68,15 @@ pub struct ClusterLoadAssignment { deserialize_with = "deser_through::" )] pub endpoints: Vec, + pub cluster_name: String, +} + + + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct EdsClusterConfig { + pub service_name: String, + pub config_source: Option, } fn simplify_locality_lb_endpoints( @@ -182,7 +192,7 @@ pub enum ClusterDiscoveryType { // serializable type when returning the EDS cluster running configuration // through admin config_dump API #[serde(rename = "EDS")] - Eds(Option), + Eds(Option, Option), #[serde(rename = "ORIGINAL_DST")] OriginalDst(OriginalDstConfig), } @@ -255,12 +265,9 @@ mod envoy_conversions { LocalityLbEndpoints, OriginalDstConfig, OriginalDstRoutingMethod, TlsConfig, TlsSecret, }; use crate::config::{ - common::*, - core::Address, - transport::{ - BindDevice, CommonTlsContext, Secrets, SupportedEnvoyTransportSocket, UpstreamTransportSocketConfig, - }, - util::duration_from_envoy, + cluster::EdsClusterConfig, common::*, core::Address, transport::{ + BindAddress, BindDeviceOptions, CommonTlsContext, Secrets, SupportedEnvoyTransportSocket, UpstreamTransportSocketConfig + }, util::duration_from_envoy, ConfigSource }; use compact_str::CompactString; use orion_data_plane_api::envoy_data_plane_api::{ @@ -268,8 +275,7 @@ mod envoy_conversions { config::{ cluster::v3::{ cluster::{ - ClusterDiscoveryType as EnvoyClusterDiscoveryType, DiscoveryType as EnvoyDiscoveryType, - LbConfig as EnvoyLbConfig, LbPolicy as EnvoyLbPolicy, + ClusterDiscoveryType as EnvoyClusterDiscoveryType, DiscoveryType as EnvoyDiscoveryType, EdsClusterConfig as EnvoyEdsClusterConfig , LbConfig as EnvoyLbConfig, LbPolicy as EnvoyLbPolicy }, Cluster as EnvoyCluster, }, @@ -290,23 +296,24 @@ mod envoy_conversions { }; use http::HeaderName; + use tracing::warn; use std::{collections::BTreeSet, num::NonZeroU32}; impl TryFrom for Cluster { type Error = GenericError; fn try_from(envoy: EnvoyCluster) -> Result { let EnvoyCluster { - transport_socket_matches, + transport_socket_matches: _, name, - alt_stat_name, + alt_stat_name: _, eds_cluster_config, connect_timeout, per_connection_buffer_limit_bytes, lb_policy, load_assignment, health_checks, - max_requests_per_connection, - circuit_breakers, + max_requests_per_connection: _, + circuit_breakers: _, upstream_http_protocol_options, common_http_protocol_options, http_protocol_options, @@ -325,14 +332,14 @@ mod envoy_conversions { cleanup_interval, upstream_bind_config, lb_subset_config, - common_lb_config, + common_lb_config: _, transport_socket, - metadata, + metadata: _, protocol_selection, - upstream_connection_options, + upstream_connection_options: _, close_connections_on_host_health_failure, ignore_health_on_host_removal, - filters, + filters: _, load_balancing_policy, lrs_server, track_timeout_budgets, @@ -348,17 +355,17 @@ mod envoy_conversions { let name = required!(name)?; (|| -> Result { unsupported_field!( - transport_socket_matches, + //transport_socket_matches, // name, - alt_stat_name, - eds_cluster_config, + //alt_stat_name, + //eds_cluster_config, // connect_timeout, per_connection_buffer_limit_bytes, // lb_policy, // load_assignment, // health_checks, - max_requests_per_connection, - circuit_breakers, + //max_requests_per_connection, + //circuit_breakers, upstream_http_protocol_options, common_http_protocol_options, http_protocol_options, @@ -377,14 +384,14 @@ mod envoy_conversions { // cleanup_interval, // upstream_bind_config, lb_subset_config, - common_lb_config, + //common_lb_config, // transport_socket, - metadata, + //metadata, protocol_selection, - upstream_connection_options, + //upstream_connection_options, close_connections_on_host_health_failure, ignore_health_on_host_removal, - filters, + //filters, load_balancing_policy, lrs_server, track_timeout_budgets, @@ -476,20 +483,32 @@ mod envoy_conversions { } } + let load_assignment = load_assignment.map(ClusterLoadAssignment::try_from); + let mut cla = match load_assignment{ + Some(Ok(cla)) => cla, + Some(Err(e))=> return Err(e), + None => ClusterLoadAssignment{ endpoints: vec![], cluster_name: String::new() }, + }; + + cla.cluster_name = name.to_string(); + let discovery_settings = ClusterDiscoveryType::try_from(( discovery_type, - load_assignment.map(ClusterLoadAssignment::try_from).transpose().with_node("load_assignment")?, + Some(cla), original_dst_config, + eds_cluster_config )) .with_node("cluster_discovery_type")?; //fixme(hayley): the envoy protobuf documentation says: // > If the address and port are empty, no bind will be performed. - // but its unclear what adress this is refering to. For now we will always bind. - let bind_device = upstream_bind_config - .map(bind_device_from_bind_config) - .transpose() - .with_node("upstream_bind_config")? - .flatten(); + // but its unclear what adress this is refering to. For now we will always bind. + let bind_device_options = if let Some(config) = upstream_bind_config{ + bind_device_from_bind_config(config)? + }else{ + BindDeviceOptions::default() + }; + + let transport_socket = transport_socket .map(UpstreamTransportSocketConfig::try_from) .transpose() @@ -573,7 +592,7 @@ mod envoy_conversions { Ok(Self { name, discovery_settings, - bind_device, + bind_device_options, transport_socket, load_balancing_policy, http_protocol_options, @@ -592,7 +611,7 @@ mod envoy_conversions { let EnvoyClusterLoadAssignment { cluster_name, endpoints, named_endpoints, policy } = value; unsupported_field!(named_endpoints, policy)?; let ret = (|| -> Result<_, _> { - let endpoints: Vec = convert_non_empty_vec!(endpoints)?; + let endpoints: Vec = convert_vec!(endpoints)?; if !endpoints.is_empty() { let set_of_priorities = endpoints.iter().map(|e| e.priority).collect::>(); let n_entries = set_of_priorities.len(); @@ -605,7 +624,7 @@ mod envoy_conversions { .with_node("endpoints"); } } - Ok(Self { endpoints }) + Ok(Self { cluster_name: cluster_name.clone(), endpoints }) })(); if !cluster_name.is_empty() { return ret.with_name(cluster_name); @@ -618,15 +637,15 @@ mod envoy_conversions { type Error = GenericError; fn try_from(value: EnvoyLocalityLbEndpoints) -> Result { let EnvoyLocalityLbEndpoints { - locality, + locality:_, lb_endpoints, - load_balancing_weight, + load_balancing_weight:_, priority, proximity, lb_config, - metadata, + metadata:_, } = value; - unsupported_field!(locality, load_balancing_weight, proximity, lb_config, metadata)?; + unsupported_field!(proximity, lb_config)?; let lb_endpoints: Vec = convert_non_empty_vec!(lb_endpoints)?; let mut sum = 0u32; for lb_endpoint in &lb_endpoints { @@ -659,11 +678,27 @@ mod envoy_conversions { } } + impl TryFrom for EdsClusterConfig{ + type Error = GenericError; + + fn try_from(value: EnvoyEdsClusterConfig) -> Result { + let config_source = if let Some(c) = value.eds_config{ + Some(ConfigSource::try_from(c)?) + }else{ + None + }; + Ok(EdsClusterConfig{ + service_name: value.service_name, + config_source, + }) + } + } + impl TryFrom for LbEndpoint { type Error = GenericError; fn try_from(value: EnvoyLbEndpoint) -> Result { - let EnvoyLbEndpoint { health_status, metadata, load_balancing_weight, host_identifier } = value; - unsupported_field!(metadata)?; + let EnvoyLbEndpoint { health_status, metadata: _istio_ignore, load_balancing_weight, host_identifier } = value; + let address = match required!(host_identifier)? { EnvoyHostIdentifier::Endpoint(EnvoyEndpoint { address, @@ -686,17 +721,25 @@ mod envoy_conversions { } } - impl TryFrom<(EnvoyDiscoveryType, Option, Option)> for ClusterDiscoveryType { + impl TryFrom<(EnvoyDiscoveryType, Option, Option, Option)> for ClusterDiscoveryType { type Error = GenericError; fn try_from( - (discovery, cla, odc): (EnvoyDiscoveryType, Option, Option), + (discovery, cla, odc, ecc): (EnvoyDiscoveryType, Option, Option, Option), ) -> Result { match (discovery, cla) { (EnvoyDiscoveryType::Static, Some(cla)) => { if cla .endpoints .iter() - .flat_map(|e| e.lb_endpoints.iter().map(|e| e.address.clone().into_addr()).collect::>()) + .flat_map(|e| { + e.lb_endpoints + .iter() + .map(|e| match e.address { + Address::Socket(_, _) => e.address.clone().into_addr().and(Ok(())), + Address::Pipe(_, _) => Ok(()), + }) + .collect::>() + }) .filter(std::result::Result::is_err) .collect::>() .is_empty() @@ -709,11 +752,19 @@ mod envoy_conversions { } }, (EnvoyDiscoveryType::Static, None) => Err(GenericError::from_msg( - "Static clusters are required to have a cluster load assignment configured", - )), - (EnvoyDiscoveryType::Eds, None) => Ok(Self::Eds(None)), - (EnvoyDiscoveryType::Eds, Some(_)) => { - Err(GenericError::from_msg("EDS clusters can't have a static cluster load assignment configured")) + "Static clusters are required to have a cluster load assignment configured")), + + (EnvoyDiscoveryType::Eds, cla) => { + if let Some(cla) = cla{ + warn!("Creating EDS cluster and skippint static endpoints {cla:?}"); + } + if let Some(ecc) = ecc{ + Ok(Self::Eds(None, Some(EdsClusterConfig::try_from(ecc)?))) + }else{ + Ok(Self::Eds(None, None)) + } + + }, (EnvoyDiscoveryType::LogicalDns, _) => Err(GenericError::unsupported_variant("LogicalDns")), (EnvoyDiscoveryType::StrictDns, Some(cla)) => Ok(ClusterDiscoveryType::StrictDns(cla)), @@ -734,7 +785,7 @@ mod envoy_conversions { } //todo(hayley): refactor this to a trait impl when splitting the envoy conversions out of this crate - fn bind_device_from_bind_config(value: EnvoyBindConfig) -> Result, GenericError> { + fn bind_device_from_bind_config(value: EnvoyBindConfig) -> Result { let EnvoyBindConfig { source_address, freebind, @@ -744,7 +795,7 @@ mod envoy_conversions { local_address_selector, } = value; unsupported_field!( - source_address, + //source_address, freebind, // socket_options, extra_source_addresses, @@ -752,10 +803,19 @@ mod envoy_conversions { local_address_selector )?; let bind_device = convert_vec!(socket_options)?; + + let address = if let Some(address) = source_address{ + Some(TryFrom::try_from(address)?) + }else{ + None + }; + let bind_address = address.map(|a| BindAddress{address:a}); + if bind_device.len() > 1 { return Err(GenericError::from_msg("at most one bind device is supported")).with_node("socket_options"); } - Ok(bind_device.into_iter().next()) + let bind_device = bind_device.into_iter().next(); + Ok(BindDeviceOptions{bind_device,bind_address,..Default::default()}) } impl TryFrom for UpstreamTransportSocketConfig { diff --git a/orion-configuration/src/config/cluster/http_protocol_options.rs b/orion-configuration/src/config/cluster/http_protocol_options.rs index df551e96..971558f0 100644 --- a/orion-configuration/src/config/cluster/http_protocol_options.rs +++ b/orion-configuration/src/config/cluster/http_protocol_options.rs @@ -61,6 +61,7 @@ pub struct CommonHttpOptions { #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] enum UpstreamHttpProtocolOptions { Explicit(ExplicitProtocolOptions), + UseDownstream(UseDownstreamProtocolOptions), } impl Default for UpstreamHttpProtocolOptions { @@ -76,6 +77,13 @@ enum ExplicitProtocolOptions { Http2(Http2ProtocolOptions), } +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(tag = "http_version", rename_all = "UPPERCASE")] +enum UseDownstreamProtocolOptions { + Http1(Http1ProtocolOptions), + Http2(Http2ProtocolOptions), +} + impl Default for ExplicitProtocolOptions { fn default() -> Self { Self::Http1(Http1ProtocolOptions) @@ -127,7 +135,9 @@ mod envoy_conversions { Codec, CommonHttpOptions, ExplicitProtocolOptions, Http1ProtocolOptions, Http2KeepAliveSettings, Http2ProtocolOptions, HttpProtocolOptions, UpstreamHttpProtocolOptions, }; - use crate::config::{common::*, util::duration_from_envoy}; + use crate::config::{ + cluster::http_protocol_options::UseDownstreamProtocolOptions, common::*, util::duration_from_envoy, + }; use orion_data_plane_api::envoy_data_plane_api::{ envoy::{ config::core::v3::{ @@ -138,7 +148,7 @@ mod envoy_conversions { http_protocol_options::{ explicit_http_config::ProtocolConfig as EnvoyProtocolConfig, ExplicitHttpConfig as EnvoyExplicitHttpConfig, - UpstreamProtocolOptions as EnvoyUpstreamProtocolOptions, + UpstreamProtocolOptions as EnvoyUpstreamProtocolOptions, UseDownstreamHttpConfig, }, HttpProtocolOptions as EnvoyHttpProtocolOptions, }, @@ -193,18 +203,18 @@ mod envoy_conversions { idle_timeout, max_connection_duration, max_headers_count, - max_stream_duration, + max_stream_duration: _, headers_with_underscores_action, - max_requests_per_connection, + max_requests_per_connection: _, max_response_headers_kb, } = value; unsupported_field!( // idle_timeout, max_connection_duration, max_headers_count, - max_stream_duration, + //max_stream_duration, headers_with_underscores_action, - max_requests_per_connection, + //max_requests_per_connection, max_response_headers_kb )?; let idle_timeout = idle_timeout @@ -238,10 +248,12 @@ mod envoy_conversions { .unwrap_or_default(); let common = common_http_protocol_options.map(CommonHttpOptions::try_from).transpose()?.unwrap_or_default(); let (codec, http1_options, http2_options) = match upstream_protocol_options { - UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http1(http1)) => { + UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http1(http1)) + | UpstreamHttpProtocolOptions::UseDownstream(UseDownstreamProtocolOptions::Http1(http1)) => { (Codec::Http1, http1, Http2ProtocolOptions::default()) }, - UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http2(http2)) => { + UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http2(http2)) + | UpstreamHttpProtocolOptions::UseDownstream(UseDownstreamProtocolOptions::Http2(http2)) => { (Codec::Http2, Http1ProtocolOptions, http2) }, }; @@ -256,8 +268,8 @@ mod envoy_conversions { match value { EnvoyUpstreamProtocolOptions::ExplicitHttpConfig(envoy) => envoy.try_into().map(Self::Explicit), EnvoyUpstreamProtocolOptions::AutoConfig(_) => Err(GenericError::unsupported_variant("AutoConfig")), - EnvoyUpstreamProtocolOptions::UseDownstreamProtocolConfig(_) => { - Err(GenericError::unsupported_variant("UseDownstreamProtocolConfig")) + EnvoyUpstreamProtocolOptions::UseDownstreamProtocolConfig(envoy) => { + envoy.try_into().map(Self::UseDownstream) }, } } @@ -271,6 +283,20 @@ mod envoy_conversions { } } + impl TryFrom for UseDownstreamProtocolOptions { + type Error = GenericError; + fn try_from(value: UseDownstreamHttpConfig) -> Result { + let UseDownstreamHttpConfig { http_protocol_options, http2_protocol_options, http3_protocol_options: _ } = + value; + + match (http_protocol_options, http2_protocol_options) { + (None, None) => Err(GenericError::MissingField("No http options provided")), + (None | Some(_), Some(http2_config)) => http2_config.try_into().map(Self::Http2), + (Some(http1_config), None) => http1_config.try_into().map(Self::Http1), + } + } + } + impl TryFrom for ExplicitProtocolOptions { type Error = GenericError; fn try_from(value: EnvoyProtocolConfig) -> Result { diff --git a/orion-configuration/src/config/core.rs b/orion-configuration/src/config/core.rs index 93901d90..3e5aa2a5 100644 --- a/orion-configuration/src/config/core.rs +++ b/orion-configuration/src/config/core.rs @@ -266,7 +266,7 @@ pub mod envoy_conversions { } } - #[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone)] + #[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone, Hash)] pub enum Address { Socket(String, u16), Pipe(String, u32), diff --git a/orion-configuration/src/config/listener.rs b/orion-configuration/src/config/listener.rs index 276771da..b6e8b573 100644 --- a/orion-configuration/src/config/listener.rs +++ b/orion-configuration/src/config/listener.rs @@ -23,11 +23,11 @@ use super::{ access_log::{AccessLog, AccessLogConf}, HttpConnectionManager, NetworkRbac, TcpProxy, }, - transport::{BindDevice, CommonTlsContext}, + transport::CommonTlsContext, GenericError, }; -use crate::config::listener; use crate::config::network_filters::tracing::{TracingConfig, TracingKey}; +use crate::config::{listener, transport::BindDeviceOptions}; use compact_str::CompactString; use ipnet::IpNet; use serde::{Deserialize, Serialize, Serializer}; @@ -46,8 +46,8 @@ pub struct Listener { pub address: SocketAddr, #[serde(with = "serde_filterchains")] pub filter_chains: HashMap, - #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] - pub bind_device: Option, + #[serde(default = "Default::default")] + pub bind_device_options: BindDeviceOptions, #[serde(skip_serializing_if = "std::ops::Not::not", default)] pub with_tls_inspector: bool, #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] @@ -180,6 +180,27 @@ pub struct FilterChainMatch { pub source_prefix_ranges: Vec, #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] pub source_ports: Vec, + + pub transport_protocol: String, + pub application_protocols: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum DetectedTransportProtocol { + RawBuffer, + Ssl, +} + +impl TryFrom<&str> for DetectedTransportProtocol { + type Error = GenericError; + + fn try_from(value: &str) -> Result { + match value { + "raw_buffer" => Ok(Self::RawBuffer), + "ssl" => Ok(Self::Ssl), + _ => Err(GenericError::Message("Invalid value for DetectedTransportProtocol".into())), + } + } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] @@ -266,6 +287,23 @@ impl FilterChainMatch { .unwrap_or(MatchResult::NoRule) } + pub fn matches_detected_transport_protocol( + &self, + detected_transport_protocol: DetectedTransportProtocol, + ) -> MatchResult { + if self.transport_protocol.is_empty() { + MatchResult::NoRule + } else if let Ok(transport_protocol) = DetectedTransportProtocol::try_from(self.transport_protocol.as_str()) { + if transport_protocol == detected_transport_protocol { + MatchResult::Matched(0) + } else { + MatchResult::FailedMatch + } + } else { + MatchResult::FailedMatch + } + } + pub fn matches_source_port(&self, source_port: u16) -> MatchResult { if self.source_ports.is_empty() { MatchResult::NoRule @@ -320,6 +358,7 @@ mod envoy_conversions { use std::str::FromStr; use super::{FilterChain, FilterChainMatch, Listener, MainFilter, ServerNameMatch, TlsConfig}; + use crate::config::transport::BindDeviceOptions; use crate::config::{ common::*, core::{Address, CidrRange}, @@ -348,6 +387,7 @@ mod envoy_conversions { google::protobuf::Any, prost::Message, }; + use tracing::warn; impl TryFrom for Listener { type Error = GenericError; @@ -359,33 +399,33 @@ mod envoy_conversions { stat_prefix, filter_chains, filter_chain_matcher, - use_original_dst, - default_filter_chain, + use_original_dst: _, + default_filter_chain: _, per_connection_buffer_limit_bytes, metadata, deprecated_v1, drain_type, listener_filters, - listener_filters_timeout, - continue_on_listener_filters_timeout, + listener_filters_timeout: _, + continue_on_listener_filters_timeout: _, transparent, freebind, socket_options, tcp_fast_open_queue_length, - traffic_direction, + traffic_direction: _, udp_listener_config, api_listener, connection_balance_config, reuse_port, enable_reuse_port, - access_log, + access_log: _, tcp_backlog_size, - max_connections_to_accept_per_socket_event, + max_connections_to_accept_per_socket_event: _, bind_to_port, enable_mptcp, - ignore_global_conn_limit, + ignore_global_conn_limit: _, listener_specifier, - bypass_overload_manager, + bypass_overload_manager: _, fcds_config, } = envoy; unsupported_field!( @@ -395,45 +435,46 @@ mod envoy_conversions { stat_prefix, // filter_chains, filter_chain_matcher, - use_original_dst, - default_filter_chain, + //use_original_dst, + //default_filter_chain, per_connection_buffer_limit_bytes, metadata, deprecated_v1, drain_type, // listener_filters, - listener_filters_timeout, - continue_on_listener_filters_timeout, + // listener_filters_timeout, + //continue_on_listener_filters_timeout, transparent, freebind, // socket_options, tcp_fast_open_queue_length, - traffic_direction, + //traffic_direction, udp_listener_config, api_listener, connection_balance_config, reuse_port, enable_reuse_port, - access_log, + //access_log, tcp_backlog_size, - max_connections_to_accept_per_socket_event, - bind_to_port, + //max_connections_to_accept_per_socket_event, enable_mptcp, - ignore_global_conn_limit, + // ignore_global_conn_limit, listener_specifier, - bypass_overload_manager, + //bypass_overload_manager, fcds_config )?; let name: CompactString = required!(name)?.into(); (|| -> Result<_, GenericError> { let name = name.clone(); let address = Address::into_addr(convert_opt!(address)?)?; + let envoy_filter_chains = filter_chains.clone(); let filter_chains: Vec = convert_non_empty_vec!(filter_chains)?; let n_filter_chains = filter_chains.len(); let filter_chains: HashMap<_, _> = filter_chains.into_iter().map(|x| x.0).collect(); // This is a hard requirement from Envoy as otherwise it can't pick which filterchain to use. if filter_chains.len() != n_filter_chains { + warn!("Duplicate filter chains {:?}", envoy_filter_chains); return Err(GenericError::from_msg("filter chain contains duplicate filter_chain_match entries") .with_node("filter_chains")); } @@ -457,6 +498,7 @@ mod envoy_conversions { } proxy_protocol_config = Some(config); }, + ListenerFilterConfig::Ignored => (), } } let bind_device = convert_vec!(socket_options)?; @@ -465,7 +507,19 @@ mod envoy_conversions { .with_node("socket_options"); } let bind_device = bind_device.into_iter().next(); - Ok(Self { name, address, filter_chains, bind_device, with_tls_inspector, proxy_protocol_config }) + + Ok(Self { + name, + address, + filter_chains, + bind_device_options: BindDeviceOptions { + bind_device, + bind_to_port: bind_to_port.map(|v| v.value), + ..Default::default() + }, + with_tls_inspector, + proxy_protocol_config, + }) }()) .with_name(name) } @@ -482,17 +536,17 @@ mod envoy_conversions { use_proxy_proto, metadata, transport_socket, - transport_socket_connect_timeout, + transport_socket_connect_timeout: _, name, } = envoy; unsupported_field!( // filter_chain_match, // filters, use_proxy_proto, - metadata, - // transport_socket, - transport_socket_connect_timeout // name, + metadata // transport_socket, + //transport_socket_connect_timeout // name, )?; + let name = if name.is_empty() { "filter_chain".to_owned() } else { name }; let name: CompactString = required!(name)?.into(); (|| -> Result<_, GenericError> { let name = name.clone(); @@ -554,6 +608,7 @@ mod envoy_conversions { } } }, + SupportedEnvoyFilter::Ignored => Ok(()), }, Err(e) => Err(e), } @@ -605,12 +660,11 @@ mod envoy_conversions { address_suffix, suffix_len, direct_source_prefix_ranges, - source_type, - // source_prefix_ranges, - // source_ports, - // server_names, - transport_protocol, - application_protocols + source_type // source_prefix_ranges, + // source_ports, + // server_names, + //transport_protocol, + //application_protocols )?; let server_names = server_names .into_iter() @@ -635,7 +689,15 @@ mod envoy_conversions { .map(|envoy| CidrRange::try_from(envoy).map(CidrRange::into_ipnet)) .collect::>() .with_node("source_prefix_ranges")?; - Ok(Self { server_names, destination_port, source_ports, destination_prefix_ranges, source_prefix_ranges }) + Ok(Self { + server_names, + destination_port, + source_ports, + destination_prefix_ranges, + source_prefix_ranges, + transport_protocol, + application_protocols, + }) } } @@ -674,6 +736,7 @@ mod envoy_conversions { HttpConnectionManager(EnvoyHttpConnectionManager), NetworkRbac(EnvoyNetworkRbac), TcpProxy(EnvoyTcpProxy), + Ignored, } impl TryFrom for SupportedEnvoyFilter { @@ -689,8 +752,14 @@ mod envoy_conversions { "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy" => { EnvoyTcpProxy::decode(typed_config.value.as_slice()).map(Self::TcpProxy) }, + "type.googleapis.com/stats.PluginConfig" | "type.googleapis.com/udpa.type.v1.TypedStruct"=> { + Ok(Self::Ignored) + } _ => { - return Err(GenericError::unsupported_variant(typed_config.type_url)); + return Err(GenericError::unsupported_variant(format!( + "Supported Envoy Filter unsupported variant {}", + typed_config.type_url + ))) }, } .map_err(|e| { diff --git a/orion-configuration/src/config/listener_filters.rs b/orion-configuration/src/config/listener_filters.rs index 6ad259ba..99172fb6 100644 --- a/orion-configuration/src/config/listener_filters.rs +++ b/orion-configuration/src/config/listener_filters.rs @@ -30,6 +30,7 @@ pub struct ListenerFilter { pub enum ListenerFilterConfig { TlsInspector, ProxyProtocol(DownstreamProxyProtocolConfig), + Ignored, } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)] @@ -66,10 +67,12 @@ mod envoy_conversions { google::protobuf::Any, prost::Message, }; + use tracing::info; #[derive(Debug, Clone)] enum SupportedEnvoyListenerFilter { TlsInspector(EnvoyTlsInspector), ProxyProtocol(EnvoyProxyProtocol), + Ignored, } impl TryFrom for SupportedEnvoyListenerFilter { @@ -82,8 +85,20 @@ mod envoy_conversions { "type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol" => { EnvoyProxyProtocol::decode(typed_config.value.as_slice()).map(Self::ProxyProtocol) }, + "type.googleapis.com/udpa.type.v1.TypedStruct" + | "type.googleapis.com/stats.PluginConfig" + | "type.googleapis.com/envoy.extensions.filters.listener.http_inspector.v3.HttpInspector" + | "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" => { + info!("Ignored Istio type {}", typed_config.type_url); + Ok(SupportedEnvoyListenerFilter::Ignored) + }, + + _ => { - return Err(GenericError::unsupported_variant(typed_config.type_url)); + return Err(GenericError::unsupported_variant(format!( + "Listener filter unsupported variant {}", + typed_config.type_url + ))); }, } .map_err(|e| { @@ -104,8 +119,8 @@ mod envoy_conversions { impl TryFrom for ListenerFilter { type Error = GenericError; fn try_from(envoy: EnvoyListenerFilter) -> Result { - let EnvoyListenerFilter { name, filter_disabled, config_type } = envoy; - unsupported_field!(filter_disabled)?; + let EnvoyListenerFilter { name, filter_disabled: _istio_ignore, config_type } = envoy; + //unsupported_field!(filter_disabled)?; let name: CompactString = required!(name)?.into(); (|| -> Result<_, GenericError> { let config = match required!(config_type) { @@ -130,12 +145,12 @@ mod envoy_conversions { match value { SupportedEnvoyListenerFilter::TlsInspector(EnvoyTlsInspector { enable_ja3_fingerprinting, - initial_read_buffer_size, + initial_read_buffer_size: _istio_ignore, enable_ja4_fingerprinting, }) => { // both fields are optional, and unsupported, but serde_yaml requires that at least one field is populated // so allow for enable_ja3_fingerprinting: false - unsupported_field!(initial_read_buffer_size, enable_ja4_fingerprinting)?; + unsupported_field!(enable_ja4_fingerprinting)?; if enable_ja3_fingerprinting.is_some_and(|b| b.value) { return Err(GenericError::UnsupportedField("enable_ja3_fingerprinting")); } @@ -145,6 +160,7 @@ mod envoy_conversions { let config = DownstreamProxyProtocolConfig::try_from(envoy_proxy_protocol)?; Ok(Self::ProxyProtocol(config)) }, + SupportedEnvoyListenerFilter::Ignored => Ok(Self::Ignored), } } } diff --git a/orion-configuration/src/config/network_filters/access_log.rs b/orion-configuration/src/config/network_filters/access_log.rs index 7ce9273f..3895df29 100644 --- a/orion-configuration/src/config/network_filters/access_log.rs +++ b/orion-configuration/src/config/network_filters/access_log.rs @@ -46,6 +46,7 @@ use orion_data_plane_api::envoy_data_plane_api::{ use orion_format::{LogFormatter, DEFAULT_ACCESS_LOG_FORMAT}; use serde::{Deserialize, Serialize}; +use tracing::debug; use crate::config::{common::*, core::DataSource}; @@ -224,10 +225,12 @@ impl TryFrom for AccessLog { }?; let logger = match fmt.flatten() { - Some(SubstitutionFormatString { format, omit_empty_values }) => orion_format::LogFormatter::try_new( - format.as_ref().map(AsRef::as_ref).unwrap_or(DEFAULT_ACCESS_LOG_FORMAT), - omit_empty_values, - ), + Some(SubstitutionFormatString { format, omit_empty_values }) => { + let format = format.as_ref().map(AsRef::as_ref).unwrap_or(DEFAULT_ACCESS_LOG_FORMAT); + debug!("Received formatter {format}"); + let format = DEFAULT_ACCESS_LOG_FORMAT; + orion_format::LogFormatter::try_new(format, omit_empty_values) + }, None => orion_format::LogFormatter::try_new(DEFAULT_ACCESS_LOG_FORMAT, false), } .map_err(|e| GenericError::from_msg(format!("Error: failed to create log formatter: {e}")))?; diff --git a/orion-configuration/src/config/network_filters/http_connection_manager.rs b/orion-configuration/src/config/network_filters/http_connection_manager.rs index cbf1512a..1983cf86 100644 --- a/orion-configuration/src/config/network_filters/http_connection_manager.rs +++ b/orion-configuration/src/config/network_filters/http_connection_manager.rs @@ -36,6 +36,7 @@ use std::{collections::HashMap, str::FromStr, time::Duration}; use crate::config::{ common::*, network_filters::{access_log::AccessLog, tracing::TracingConfig}, + ConfigSource, }; #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] @@ -339,6 +340,11 @@ pub enum RetryOn { RetriableStatusCodes, RetriableHeaders, Http3PostConnectFailure, + GRPCCancelled, + GRPCDeadlineExceeded, + GRPCInternal, + GRPCResourceExhausted, + GRPCUnavailable, } impl FromStr for RetryOn { @@ -349,7 +355,7 @@ impl FromStr for RetryOn { match s { "5xx" => Ok(RetryOn::Err5xx), "gateway-error" => Ok(RetryOn::GatewayError), - "reset" => Ok(RetryOn::Reset), + "reset" | "reset-before-request" => Ok(RetryOn::Reset), "connect-failure" => Ok(RetryOn::ConnectFailure), "envoy-ratelimited" => Ok(RetryOn::EnvoyRateLimited), "retriable-4xx" => Ok(RetryOn::Retriable4xx), @@ -357,6 +363,12 @@ impl FromStr for RetryOn { "retriable-status-codes" => Ok(RetryOn::RetriableStatusCodes), "retriable-headers" => Ok(RetryOn::RetriableHeaders), "http3-post-connect-failure" => Ok(RetryOn::Http3PostConnectFailure), + // https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#config-http-filters-router-x-envoy-retry-on + "cancelled" => Ok(RetryOn::GRPCCancelled), + "deadline-exceeded" => Ok(RetryOn::GRPCDeadlineExceeded), + "internal" => Ok(RetryOn::GRPCInternal), + "resource-exhausted" => Ok(RetryOn::GRPCResourceExhausted), + "unavailable" => Ok(RetryOn::GRPCUnavailable), s => Err(GenericError::from_msg(format!("Invalid RetryOn value \"{s}\""))), } } @@ -393,16 +405,6 @@ pub struct RdsSpecifier { pub config_source: ConfigSource, } -#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] -pub struct ConfigSource { - pub config_source_specifier: ConfigSourceSpecifier, -} - -#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] -pub enum ConfigSourceSpecifier { - ADS, -} - #[cfg(test)] mod tests { @@ -578,8 +580,8 @@ mod envoy_conversions { router::Router, FilterConfigOverride, FilterOverride, HttpFilter, HttpFilterType, SupportedEnvoyFilter, SupportedEnvoyHttpFilter, }, - CodecType, ConfigSource, ConfigSourceSpecifier, HttpConnectionManager, RdsSpecifier, RetryBackoff, RetryOn, - RetryPolicy, Route, RouteConfiguration, RouteSpecifier, UpgradeType, VirtualHost, XffSettings, + CodecType, HttpConnectionManager, RdsSpecifier, RetryBackoff, RetryOn, RetryPolicy, Route, RouteConfiguration, + RouteSpecifier, UpgradeType, VirtualHost, XffSettings, }; use crate::config::{ common::*, @@ -589,15 +591,9 @@ mod envoy_conversions { use compact_str::CompactString; use http::HeaderName; use orion_data_plane_api::envoy_data_plane_api::envoy::{ - config::{ - core::v3::{ - config_source::ConfigSourceSpecifier as EnvoyConfigSourceSpecifier, AggregatedConfigSource, - ConfigSource as EnvoyConfigSource, - }, - route::v3::{ - retry_policy::RetryBackOff as EnvoyRetryBackoff, RetryPolicy as EnvoyRetryPolicy, Route as EnvoyRoute, - RouteConfiguration as EnvoyRouteConfiguration, VirtualHost as EnvoyVirtualHost, - }, + config::route::v3::{ + retry_policy::RetryBackOff as EnvoyRetryBackoff, RetryPolicy as EnvoyRetryPolicy, Route as EnvoyRoute, + RouteConfiguration as EnvoyRouteConfiguration, VirtualHost as EnvoyVirtualHost, }, extensions::filters::network::http_connection_manager::v3::{ http_connection_manager::{CodecType as EnvoyCodecType, RouteSpecifier as EnvoyRouteSpecifier}, @@ -639,14 +635,14 @@ mod envoy_conversions { add_user_agent, tracing, common_http_protocol_options, - http_protocol_options, + http_protocol_options: _istio_ignore_1, http2_protocol_options, http3_protocol_options, - server_name, + server_name: _, server_header_transformation, scheme_header_transformation, max_request_headers_kb, - stream_idle_timeout, + stream_idle_timeout: _, request_timeout, request_headers_timeout, drain_timeout, @@ -665,15 +661,15 @@ mod envoy_conversions { generate_request_id, preserve_external_request_id, always_set_request_id_in_response, - forward_client_cert_details, - set_current_client_cert_details, - proxy_100_continue, + forward_client_cert_details: _, + set_current_client_cert_details: _, + proxy_100_continue: _, represent_ipv4_remote_address_as_ipv4_mapped_ipv6, upgrade_configs, - normalize_path, + normalize_path: _, merge_slashes, - path_with_escaped_slashes_action, - request_id_extension, + path_with_escaped_slashes_action: _, + request_id_extension: _, local_reply_config, strip_matching_host_port, stream_error_on_invalid_http_message, @@ -695,14 +691,14 @@ mod envoy_conversions { add_user_agent, // tracing, common_http_protocol_options, - http_protocol_options, + //http_protocol_options, http2_protocol_options, http3_protocol_options, - server_name, + //server_name, server_header_transformation, scheme_header_transformation, max_request_headers_kb, - stream_idle_timeout, + //stream_idle_timeout, // request_timeout, request_headers_timeout, drain_timeout, @@ -721,15 +717,15 @@ mod envoy_conversions { // generate_request_id, // preserve_external_request_id, // always_set_request_id_in_response, - forward_client_cert_details, - set_current_client_cert_details, - proxy_100_continue, + //forward_client_cert_details, + //set_current_client_cert_details, + //proxy_100_continue, represent_ipv4_remote_address_as_ipv4_mapped_ipv6, // upgrade_configs, - normalize_path, + //normalize_path, merge_slashes, - path_with_escaped_slashes_action, - request_id_extension, + //path_with_escaped_slashes_action, + //request_id_extension, local_reply_config, strip_matching_host_port, stream_error_on_invalid_http_message, @@ -894,11 +890,11 @@ mod envoy_conversions { request_headers_to_add, request_headers_to_remove, most_specific_header_mutations_wins, - validate_clusters, - max_direct_response_body_size_bytes, + validate_clusters: _, + max_direct_response_body_size_bytes: _istio_ignore_1, cluster_specifier_plugins, request_mirror_policies, - ignore_port_in_host_matching, + ignore_port_in_host_matching: _istio_ignore_2, ignore_path_parameters_in_path_matching, typed_per_filter_config, metadata, @@ -913,15 +909,16 @@ mod envoy_conversions { // request_headers_to_add, // request_headers_to_remove, // most_specific_header_mutations_wins, - validate_clusters, - max_direct_response_body_size_bytes, + //validate_clusters, + //max_direct_response_body_size_bytes, cluster_specifier_plugins, request_mirror_policies, - ignore_port_in_host_matching, + //ignore_port_in_host_matching, ignore_path_parameters_in_path_matching, typed_per_filter_config, metadata )?; + let name = if name.is_empty() { "route_configuration".to_owned() } else { name }; let name: CompactString = required!(name)?.into(); (|| -> Result<_, GenericError> { let response_headers_to_add = convert_vec!(response_headers_to_add)?; @@ -976,7 +973,7 @@ mod envoy_conversions { response_headers_to_remove, cors, typed_per_filter_config, - include_request_attempt_count, + include_request_attempt_count: _istio_ignored_1, include_attempt_count_in_response, retry_policy, retry_policy_typed_config, @@ -1001,7 +998,7 @@ mod envoy_conversions { // response_headers_to_remove, cors, typed_per_filter_config, - include_request_attempt_count, + //include_request_attempt_count, include_attempt_count_in_response, // retry_policy, retry_policy_typed_config, @@ -1061,9 +1058,9 @@ mod envoy_conversions { per_try_timeout, per_try_idle_timeout, retry_priority, - retry_host_predicate, + retry_host_predicate: _istio_ignored_1, retry_options_predicates, - host_selection_retry_max_attempts, + host_selection_retry_max_attempts: _istio_ignored_2, retriable_status_codes, retry_back_off, rate_limited_retry_back_off, @@ -1076,9 +1073,9 @@ mod envoy_conversions { // per_try_timeout, per_try_idle_timeout, retry_priority, - retry_host_predicate, + //retry_host_predicate, retry_options_predicates, - host_selection_retry_max_attempts, + //host_selection_retry_max_attempts, // retriable_status_codes, // retry_back_off, // retriable_headers, @@ -1150,8 +1147,8 @@ mod envoy_conversions { let EnvoyRoute { name, r#match, - metadata, - decorator, + metadata: _istio_ignore_1, + decorator: _, typed_per_filter_config, request_headers_to_add, request_headers_to_remove, @@ -1166,8 +1163,8 @@ mod envoy_conversions { unsupported_field!( //name, // r#match, - metadata, - decorator, + //metadata, + //decorator, // typed_per_filter_config, // request_headers_to_add, // request_headers_to_remove, @@ -1230,31 +1227,4 @@ mod envoy_conversions { Ok(Self { route_config_name, config_source }) } } - impl TryFrom for ConfigSource { - type Error = GenericError; - fn try_from(value: EnvoyConfigSource) -> Result { - let EnvoyConfigSource { authorities, initial_fetch_timeout, resource_api_version, config_source_specifier } = - value; - unsupported_field!(authorities, initial_fetch_timeout, resource_api_version)?; - let config_source_specifier = convert_opt!(config_source_specifier)?; - Ok(Self { config_source_specifier }) - } - } - - impl TryFrom for ConfigSourceSpecifier { - type Error = GenericError; - fn try_from(value: EnvoyConfigSourceSpecifier) -> Result { - match value { - EnvoyConfigSourceSpecifier::Ads(AggregatedConfigSource {}) => Ok(Self::ADS), - EnvoyConfigSourceSpecifier::ApiConfigSource(_) => { - Err(GenericError::unsupported_variant("ApiConfigSource")) - }, - EnvoyConfigSourceSpecifier::Path(_) => Err(GenericError::unsupported_variant("Path")), - EnvoyConfigSourceSpecifier::PathConfigSource(_) => { - Err(GenericError::unsupported_variant("PathConfigSource")) - }, - EnvoyConfigSourceSpecifier::Self_(_) => Err(GenericError::unsupported_variant("Self_")), - } - } - } } diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs index 971aa9ef..f3e7cdde 100644 --- a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs +++ b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs @@ -63,6 +63,7 @@ pub struct HttpFilter { pub enum HttpFilterType { Rbac(HttpRbac), RateLimit(LocalRateLimit), + Ingored, } #[cfg(feature = "envoy-conversions")] @@ -93,6 +94,7 @@ mod envoy_conversions { google::protobuf::Any, prost::Message, }; + use tracing::info; #[derive(Debug, Clone)] pub(crate) struct SupportedEnvoyHttpFilter { @@ -136,6 +138,7 @@ mod envoy_conversions { SupportedEnvoyFilter::Router(_) => { Err(GenericError::from_msg("router filter has to be the last filter in the chain")) }, + SupportedEnvoyFilter::Ignored => Ok(Self::Ingored), } } } @@ -146,6 +149,7 @@ mod envoy_conversions { LocalRateLimit(EnvoyLocalRateLimit), Rbac(EnvoyRbac), Router(EnvoyRouter), + Ignored, } impl TryFrom for SupportedEnvoyFilter { @@ -161,7 +165,22 @@ mod envoy_conversions { "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" => { EnvoyRouter::decode(typed_config.value.as_slice()).map(Self::Router) }, - _ => return Err(GenericError::unsupported_variant(typed_config.type_url)), + "type.googleapis.com/udpa.type.v1.TypedStruct" + | "type.googleapis.com/stats.PluginConfig" + | "type.googleapis.com/envoy.extensions.filters.http.grpc_stats.v3.FilterConfig" + | "type.googleapis.com/istio.envoy.config.filter.http.alpn.v2alpha1.FilterConfig" + | "type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault" + | "type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors" => { + info!("Ignored Istio type {}", typed_config.type_url); + Ok(SupportedEnvoyFilter::Ignored) + }, + + _ => { + return Err(GenericError::unsupported_variant(format!( + "HTTP filter unsupported variant {}", + typed_config.type_url + ))) + }, } .map_err(|e| { GenericError::from_msg_with_cause( @@ -189,7 +208,12 @@ mod envoy_conversions { "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute" => { EnvoyRbacPerRoute::decode(typed_config.value.as_slice()).map(Self::Rbac) }, - _ => return Err(GenericError::unsupported_variant(typed_config.type_url)), + _ => { + return Err(GenericError::unsupported_variant(format!( + "HTTP Filter override unsupported variant {}", + typed_config.type_url + ))) + }, } .map_err(|e| { GenericError::from_msg_with_cause( diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/route.rs b/orion-configuration/src/config/network_filters/http_connection_manager/route.rs index 4a7fec82..3edf89ce 100644 --- a/orion-configuration/src/config/network_filters/http_connection_manager/route.rs +++ b/orion-configuration/src/config/network_filters/http_connection_manager/route.rs @@ -865,14 +865,14 @@ mod envoy_conversions { include_vh_rate_limits, hash_policy, cors, - max_grpc_timeout, + max_grpc_timeout: _istio_ignored_1, grpc_timeout_offset, upgrade_configs, internal_redirect_policy, internal_redirect_action, max_internal_redirects, hedge_policy, - max_stream_duration, + max_stream_duration: _, cluster_specifier, host_rewrite_specifier, } = value; @@ -894,14 +894,14 @@ mod envoy_conversions { include_vh_rate_limits, // hash_policy, cors, - max_grpc_timeout, + //max_grpc_timeout, grpc_timeout_offset, // upgrade_configs, internal_redirect_policy, internal_redirect_action, max_internal_redirects, hedge_policy, - max_stream_duration, + //max_stream_duration, // cluster_specifier, host_rewrite_specifier )?; diff --git a/orion-configuration/src/config/runtime.rs b/orion-configuration/src/config/runtime.rs index 092b3541..6d404822 100644 --- a/orion-configuration/src/config/runtime.rs +++ b/orion-configuration/src/config/runtime.rs @@ -163,8 +163,8 @@ fn parse_cgroup_v2_cpu_max(content: &str) -> crate::Result { fn get_cgroup_v1_cpu_limit() -> crate::Result { let cgroup_path = get_cgroup_v1_cpu_path()?; - let quota_path = format!("{}/cpu.cfs_quota_us", cgroup_path); - let period_path = format!("{}/cpu.cfs_period_us", cgroup_path); + let quota_path = format!("{cgroup_path}/cpu.cfs_quota_us"); + let period_path = format!("{cgroup_path}/cpu.cfs_period_us"); let quota_content = std::fs::read_to_string("a_path)?; let period_content = std::fs::read_to_string(&period_path)?; @@ -196,7 +196,7 @@ fn parse_cgroup_v1_cpu_path(cgroup_content: &str) -> crate::Result { let mut parts = line.split(':'); if let (Some(_), Some(controllers), Some(path)) = (parts.next(), parts.next(), parts.next()) { if controllers.split(',').any(|c| c == "cpu") { - return Some(format!("/sys/fs/cgroup/cpu{}", path)); + return Some(format!("/sys/fs/cgroup/cpu{path}")); } } None @@ -205,7 +205,7 @@ fn parse_cgroup_v1_cpu_path(cgroup_content: &str) -> crate::Result { } if std::path::Path::new("/sys/fs/cgroup/cpu").exists() { - Ok("/sys/fs/cgroup/cpu".to_string()) + Ok("/sys/fs/cgroup/cpu".to_owned()) } else { Err("CPU cgroup path not found".into()) } @@ -321,7 +321,7 @@ mod tests { let system_cpus = num_cpus::get(); let detected_cpus = detect_available_cpus(); assert!(detected_cpus > 0); - println!("System CPUs: {}, Detected CPUs: {}", system_cpus, detected_cpus); + println!("System CPUs: {system_cpus}, Detected CPUs: {detected_cpus}"); } #[test] diff --git a/orion-configuration/src/config/secret.rs b/orion-configuration/src/config/secret.rs index 06bc85c5..40c6ebde 100644 --- a/orion-configuration/src/config/secret.rs +++ b/orion-configuration/src/config/secret.rs @@ -52,15 +52,12 @@ pub enum Type { } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] -pub struct ValidationContext { - trusted_ca: DataSource, +pub enum ValidationContext { + TrustedCA(DataSource), + None } -impl ValidationContext { - pub fn trusted_ca(&self) -> &DataSource { - &self.trusted_ca - } -} + #[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct TlsCertificate { @@ -168,7 +165,7 @@ mod envoy_conversions { verify_certificate_spki, verify_certificate_hash, match_typed_subject_alt_names, - match_subject_alt_names, + match_subject_alt_names:_, require_signed_certificate_timestamp, crl, allow_expired_certificate, @@ -185,7 +182,7 @@ mod envoy_conversions { verify_certificate_spki, verify_certificate_hash, match_typed_subject_alt_names, - match_subject_alt_names, + //match_subject_alt_names, require_signed_certificate_timestamp, crl, allow_expired_certificate, @@ -195,8 +192,13 @@ mod envoy_conversions { max_verify_depth, system_root_certs )?; - let trusted_ca = convert_opt!(trusted_ca)?; - Ok(Self { trusted_ca }) + //let trusted_ca = convert_opt!(trusted_ca)?; + if let Some(trusted_ca) = trusted_ca{ + Ok(Self::TrustedCA(trusted_ca.try_into()?)) + }else{ + Ok(Self::None) + } + } } } diff --git a/orion-configuration/src/config/transport.rs b/orion-configuration/src/config/transport.rs index 67d50d95..5394d85b 100644 --- a/orion-configuration/src/config/transport.rs +++ b/orion-configuration/src/config/transport.rs @@ -19,7 +19,7 @@ // use super::secret::{TlsCertificate, ValidationContext}; -use crate::config::{cluster, common::*}; +use crate::config::{cluster, common::*, core::Address}; use base64::Engine as _; use compact_str::CompactString; use serde::{ @@ -32,12 +32,29 @@ use std::{ str::FromStr, }; +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Default)] +pub struct BindDeviceOptions { + pub bind_device: Option, + pub bind_address: Option, + pub bind_to_port: Option, +} + #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct BindDevice { /// A interface name as defined by linux `SO_BINDTODEVICE` interface: CString, } +#[derive(Clone, Debug, Eq, Serialize, Deserialize, PartialEq, Hash)] +pub struct BindAddress { + pub(crate) address: Address, +} +impl BindAddress { + pub fn address(&self) -> &Address { + &self.address + } +} + impl BindDevice { pub fn interface(&self) -> &CStr { &self.interface @@ -357,7 +374,7 @@ mod envoy_conversions { let EnvoyTlsParameters { tls_minimum_protocol_version, tls_maximum_protocol_version, - cipher_suites, + cipher_suites: _, ecdh_curves, signature_algorithms, compliance_policies, @@ -365,7 +382,7 @@ mod envoy_conversions { unsupported_field!( // tls_minimum_protocol_version, // tls_maximum_protocol_version, - cipher_suites, + //cipher_suites, ecdh_curves, signature_algorithms, compliance_policies @@ -423,7 +440,7 @@ mod envoy_conversions { tls_certificate_provider_instance, tls_certificate_certificate_provider, tls_certificate_certificate_provider_instance, - alpn_protocols, + alpn_protocols: _, custom_handshaker, key_log, validation_context_type, @@ -436,7 +453,7 @@ mod envoy_conversions { tls_certificate_provider_instance, tls_certificate_certificate_provider, tls_certificate_certificate_provider_instance, - alpn_protocols, + //alpn_protocols, custom_handshaker, key_log, // validation_context_type custom_tls_certificate_selector @@ -461,9 +478,9 @@ mod envoy_conversions { impl TryFrom for SdsConfig { type Error = GenericError; fn try_from(value: EnvoySdsSecretConfig) -> Result { - let EnvoySdsSecretConfig { name, sds_config } = value; + let EnvoySdsSecretConfig { name, sds_config: _ } = value; let name: CompactString = required!(name)?.into(); - unsupported_field!(sds_config).with_name(name.clone())?; + //unsupported_field!(sds_config).with_name(name.clone())?; Ok(Self { name }) } } @@ -478,8 +495,16 @@ mod envoy_conversions { EnvoyValidationContextType::ValidationContextSdsSecretConfig(x) => { SdsConfig::try_from(x).map(|sds| Self::SdsConfig(sds.name)) }, - EnvoyValidationContextType::CombinedValidationContext(_) => { - Err(GenericError::unsupported_variant("CombinedValidationContext")) + EnvoyValidationContextType::CombinedValidationContext(combined_context) => { + if let Some(context) = combined_context.default_validation_context { + context.try_into().map(Self::ValidationContext) + } else if let Some(context) = combined_context.validation_context_sds_secret_config { + SdsConfig::try_from(context).map(|sds| Self::SdsConfig(sds.name)) + } else { + Err(GenericError::Message( + "CombinedValidationContext at least one validation method needs to be set".into(), + )) + } }, EnvoyValidationContextType::ValidationContextCertificateProvider(_) => { Err(GenericError::unsupported_variant("ValidationContextCertificateProvider")) diff --git a/orion-data-plane-api/src/xds/bindings.rs b/orion-data-plane-api/src/xds/bindings.rs deleted file mode 100644 index 55b5280f..00000000 --- a/orion-data-plane-api/src/xds/bindings.rs +++ /dev/null @@ -1,160 +0,0 @@ -// SPDX-FileCopyrightText: © 2025 kmesh authors -// SPDX-License-Identifier: Apache-2.0 -// -// Copyright 2025 kmesh authors -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// - -use std::{future::Future, pin::Pin}; - -use super::model; -use envoy_data_plane_api::envoy::service::{ - cluster::v3::cluster_discovery_service_client::ClusterDiscoveryServiceClient, - discovery::v3::{ - aggregated_discovery_service_client::AggregatedDiscoveryServiceClient, DeltaDiscoveryRequest, - DeltaDiscoveryResponse, - }, - endpoint::v3::endpoint_discovery_service_client::EndpointDiscoveryServiceClient, - listener::v3::listener_discovery_service_client::ListenerDiscoveryServiceClient, - route::v3::route_discovery_service_client::RouteDiscoveryServiceClient, - secret::v3::secret_discovery_service_client::SecretDiscoveryServiceClient, -}; -use model::TypeUrl; - -use envoy_data_plane_api::tonic; -use tokio_stream::Stream; -use tonic::transport::Channel; - -pub type DeltaFuture<'a> = Pin< - Box< - dyn Future< - Output = std::result::Result< - tonic::Response>, - tonic::Status, - >, - > + Send - + 'a, - >, ->; - -/// Abstracts over the variation in generated xDS clients -pub trait TypedXdsBinding { - fn type_url() -> Option; - fn delta_request(&mut self, request: impl Stream + Send + 'static) - -> DeltaFuture<'_>; -} - -/// Handle to ADS client -pub struct AggregatedDiscoveryType { - pub underlying_client: AggregatedDiscoveryServiceClient, -} - -impl TypedXdsBinding for AggregatedDiscoveryType { - fn type_url() -> Option { - None - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_aggregated_resources(request)) - } -} - -/// Handle to CDS client -pub struct ClusterDiscoveryType { - pub underlying_client: ClusterDiscoveryServiceClient, -} - -impl TypedXdsBinding for ClusterDiscoveryType { - fn type_url() -> Option { - Some(TypeUrl::Cluster) - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_clusters(request)) - } -} - -/// Handle to LDS Client -pub struct ListenerDiscoveryType { - pub underlying_client: ListenerDiscoveryServiceClient, -} - -impl TypedXdsBinding for ListenerDiscoveryType { - fn type_url() -> Option { - Some(TypeUrl::Listener) - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_listeners(request)) - } -} - -/// Handle to RDS Client -pub struct RouteDiscoveryType { - pub underlying_client: RouteDiscoveryServiceClient, -} - -impl TypedXdsBinding for RouteDiscoveryType { - fn type_url() -> Option { - Some(TypeUrl::RouteConfiguration) - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_routes(request)) - } -} - -/// Handle to EDS Client -pub struct EndpointDiscoveryType { - pub underlying_client: EndpointDiscoveryServiceClient, -} - -impl TypedXdsBinding for EndpointDiscoveryType { - fn type_url() -> Option { - Some(TypeUrl::ClusterLoadAssignment) - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_endpoints(request)) - } -} - -/// Handle to SDS Client -pub struct SecretsDiscoveryType { - pub underlying_client: SecretDiscoveryServiceClient, -} - -impl TypedXdsBinding for SecretsDiscoveryType { - fn type_url() -> Option { - Some(TypeUrl::Secret) - } - fn delta_request( - &mut self, - request: impl Stream + Send + 'static, - ) -> DeltaFuture<'_> { - Box::pin(self.underlying_client.delta_secrets(request)) - } -} diff --git a/orion-data-plane-api/src/xds/client.rs b/orion-data-plane-api/src/xds/client.rs deleted file mode 100644 index 52f41eda..00000000 --- a/orion-data-plane-api/src/xds/client.rs +++ /dev/null @@ -1,426 +0,0 @@ -// SPDX-FileCopyrightText: © 2025 kmesh authors -// SPDX-License-Identifier: Apache-2.0 -// -// Copyright 2025 kmesh authors -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// - -use super::{ - bindings, - model::{RejectedConfig, ResourceId, ResourceVersion, TypeUrl, XdsError, XdsResourcePayload, XdsResourceUpdate}, -}; -use envoy_data_plane_api::{ - envoy::{ - config::core::v3::Node, - service::discovery::v3::{DeltaDiscoveryRequest, DeltaDiscoveryResponse}, - }, - google::rpc::Status, - tonic, -}; -use std::{ - collections::{HashMap, HashSet}, - time::Duration, -}; - -use tokio::{ - sync::{mpsc, oneshot}, - time, -}; -use tracing::{debug, info, warn}; - -pub struct DiscoveryClientBuilder { - node: Node, - client_binding: C, - initial_subscriptions: HashMap>, - error: Option, -} - -impl DiscoveryClientBuilder -where - C: bindings::TypedXdsBinding, -{ - pub fn new(node: Node, client: C) -> DiscoveryClientBuilder { - DiscoveryClientBuilder { node, client_binding: client, initial_subscriptions: HashMap::new(), error: None } - } - - pub fn subscribe_resource_name(mut self, resource_id: ResourceId) -> Self { - if let Some(type_url) = C::type_url() { - self = self.subscribe_resource_name_by_typeurl(resource_id, type_url); - } else { - self.error = Some("subscribe only works if typed binding provides a compatible type_url".to_owned()); - } - self - } - - pub fn subscribe_resource_name_by_typeurl(mut self, resource_id: ResourceId, type_url: TypeUrl) -> Self { - let configured_type_url = C::type_url(); - if configured_type_url.is_none() || configured_type_url.is_some_and(|type_is_set| type_is_set == type_url) { - self.initial_subscriptions.entry(type_url).or_default().insert(resource_id); - } else { - self.error = Some("can only subscribe by type_url when using a compatible typed binding".to_owned()); - } - self - } - - pub fn build(self) -> Result<(DeltaClientBackgroundWorker, DeltaDiscoveryClient), XdsError> { - if let Some(err) = self.error { - Err(XdsError::BuilderFailed(err)) - } else { - let (subscription_updates_tx, subscription_updates_rx) = mpsc::channel::(100); - let (resource_updates_tx, resource_updates_rx) = mpsc::channel::(100); - Ok(( - DeltaClientBackgroundWorker { - node: self.node, - client_binding: self.client_binding, - initial_subscriptions: self.initial_subscriptions, - subscriptions_rx: subscription_updates_rx, - resources_tx: resource_updates_tx, - }, - DeltaDiscoveryClient { subscriptions_tx: subscription_updates_tx, resources_rx: resource_updates_rx }, - )) - } - } -} - -/// Incremental Client that operates the delta version of the xDS protocol -/// use to consume xDS configuration updates asychronously, modify resource subscriptions -pub struct DeltaDiscoveryClient { - subscriptions_tx: mpsc::Sender, - resources_rx: mpsc::Receiver, -} - -impl DeltaDiscoveryClient { - pub async fn recv(&mut self) -> Option { - self.resources_rx.recv().await - } - - #[cfg(test)] - pub async fn try_recv(&mut self) -> Result { - self.resources_rx.try_recv() - } - - pub async fn subscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> anyhow::Result<()> { - Ok(self.subscriptions_tx.send(SubscriptionEvent::Subscribe(type_url, resource_id)).await?) - } - - pub async fn unsubscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> anyhow::Result<()> { - Ok(self.subscriptions_tx.send(SubscriptionEvent::Unsubscribe(type_url, resource_id)).await?) - } -} - -#[derive(Debug)] -pub struct XdsUpdateEvent { - pub updates: Vec, - pub ack_channel: oneshot::Sender>, -} - -#[derive(Clone, Debug)] -pub enum SubscriptionEvent { - Subscribe(TypeUrl, ResourceId), - Unsubscribe(TypeUrl, ResourceId), -} - -/// Background worker that handles interactions with remote xDS services -pub struct DeltaClientBackgroundWorker { - node: Node, - client_binding: C, - initial_subscriptions: HashMap>, - subscriptions_rx: mpsc::Receiver, - resources_tx: mpsc::Sender, -} - -impl DeltaClientBackgroundWorker { - pub async fn run(&mut self) -> anyhow::Result<()> { - let mut connection_id = 0; - - let mut state = DiscoveryClientState { - backoff: Duration::from_millis(50), - tracked: HashMap::new(), - subscriptions: self.initial_subscriptions.clone(), - }; - loop { - connection_id += 1; - debug!(connection_id, "starting xDS (re)connect cycle"); - self.persistently_connect(&mut state).await; - } - } -} - -#[derive(Debug)] -struct DiscoveryClientState { - backoff: Duration, - tracked: HashMap>, - subscriptions: HashMap>, -} - -impl DeltaClientBackgroundWorker { - async fn persistently_connect(&mut self, state: &mut DiscoveryClientState) { - const MAX_BACKOFF: Duration = Duration::from_secs(20); - let backoff = std::cmp::min(MAX_BACKOFF, state.backoff * 2); - let backoff_slowly = backoff + Duration::from_millis(50); - - match self.stream_resources(state).await { - Err(ref e @ XdsError::GrpcStatus(ref status)) => { - let err_detail = e.to_string(); - if status.code() == tonic::Code::Unknown - || status.code() == tonic::Code::Cancelled - || status.code() == tonic::Code::DeadlineExceeded - || status.code() == tonic::Code::Unavailable - { - warn!("xDS client terminated: {}, retrying in {:?}", err_detail, backoff); - } else { - warn!("xDS client interupted: {}, retrying in {:?}", err_detail, backoff); - } - tokio::time::sleep(backoff).await; - state.backoff = backoff; - }, - Err(e) => { - warn!("xDS client error: {}, retrying in {:?}", e, backoff_slowly); - tokio::time::sleep(backoff_slowly).await; - state.backoff = backoff_slowly; - }, - Ok(_) => { - warn!("xDS client closed"); - state.backoff = Duration::from_millis(50) - }, - } - } - - async fn stream_resources(&mut self, state: &mut DiscoveryClientState) -> anyhow::Result<(), XdsError> { - let (discovery_requests_tx, mut discovery_requests_rx) = mpsc::channel::(100); - - let resource_types = match C::type_url() { - Some(type_url) => vec![type_url], - _ => vec![ - TypeUrl::Secret, - TypeUrl::ClusterLoadAssignment, - TypeUrl::Cluster, - TypeUrl::RouteConfiguration, - TypeUrl::Listener, - ], - }; - let initial_requests: Vec = resource_types - .iter() - .map(|resource_type| { - let subscriptions = state.subscriptions.get(resource_type).cloned().unwrap_or_default(); - let already_tracked: HashMap = - state.tracked.get(resource_type).cloned().unwrap_or_default(); - DeltaDiscoveryRequest { - node: Some(self.node.clone()), - type_url: resource_type.to_string(), - initial_resource_versions: already_tracked, - resource_names_subscribe: subscriptions.into_iter().collect(), - ..Default::default() - } - }) - .collect(); - - let outbound_requests = async_stream::stream! { - for request in initial_requests { - yield request; - } - while let Some(message) = discovery_requests_rx.recv().await { - debug!( - type_url = message.type_url, - "sending discovery request" - ); - yield message - } - warn!("outbound discovery request stream has ended!"); - }; - - let mut response_stream = - self.client_binding.delta_request(outbound_requests).await.map_err(XdsError::GrpcStatus)?.into_inner(); - info!("xDS stream established"); - - loop { - tokio::select! { - Some(event) = self.subscriptions_rx.recv() => { - match event { - SubscriptionEvent::Subscribe(type_url, resource_id) => { - debug!( - type_url=type_url.to_string(), - resource_id, - "processing new subscription" - ); - let is_new = state.subscriptions - .entry(type_url) - .or_default() - .insert(resource_id.clone()); - if is_new { - if let Err(err) = discovery_requests_tx.send(DeltaDiscoveryRequest { - node: Some(self.node.clone()), - type_url: type_url.to_string(), - resource_names_subscribe: vec![resource_id], - ..Default::default() - }) - .await { - warn!("problems updating subscription: {:?}", err); - } - } - } - SubscriptionEvent::Unsubscribe(type_url, resource_id) => { - debug!( - type_url=type_url.to_string(), - resource_id, - "processing unsubscribe" - ); - let was_subscribed = state.subscriptions - .entry(type_url) - .or_default() - .remove(resource_id.as_str()); - if was_subscribed { - if let Err(err) = discovery_requests_tx.send(DeltaDiscoveryRequest { - node: Some(self.node.clone()), - type_url: type_url.to_string(), - resource_names_unsubscribe: vec![resource_id], - ..Default::default() - }) - .await { - warn!("problems updating subscription: {:?}", err); - } - } - } - } - } - discovered = response_stream.message() => { - let payload = discovered?; - let discovery_response = payload.ok_or(XdsError::UnknownResourceType("empty payload received".to_owned()))?; - self.process_and_acknowledge(discovery_response, &discovery_requests_tx, state).await?; - } - } - } - } - - async fn process_and_acknowledge( - &mut self, - response: DeltaDiscoveryResponse, - acknowledgments_tx: &mpsc::Sender, - state: &mut DiscoveryClientState, - ) -> anyhow::Result<(), XdsError> { - let type_url = TypeUrl::try_from(response.type_url.as_ref())?; - let nonce = response.nonce.clone(); - info!(type_url = type_url.to_string(), size = response.resources.len(), "received config resources from xDS"); - - let for_removal: Vec = response - .removed_resources - .iter() - .map(|resource_id| { - debug!("received delete for config resource {}", resource_id); - if let Some(resources) = state.tracked.get_mut(&type_url) { - resources.remove(resource_id); - } - resource_id.clone() - }) - .collect(); - - let mut pending_update_versions = HashMap::::new(); - - let updates: Vec = response - .resources - .into_iter() - .filter_map(|resource| { - let resource_id = resource.name.to_string(); - let resource_version = resource.version.to_string(); - let decoded = XdsResourcePayload::try_from((resource, type_url)); - if decoded.is_err() { - warn!("problem decoding config update for {} : error {:?}", resource_id, decoded.as_ref().err()); - } else { - pending_update_versions.insert(resource_id.clone(), resource_version); - debug!("decoded config update for resource {resource_id}"); - } - decoded.ok().map(|value| XdsResourceUpdate::Update(resource_id.clone(), value)) - }) - .chain(for_removal.into_iter().map(|resource_id| XdsResourceUpdate::Remove(resource_id, type_url))) - .collect(); - - let (internal_ack_tx, internal_ack_rx) = oneshot::channel::>(); - let notification = XdsUpdateEvent { updates, ack_channel: internal_ack_tx }; - self.resources_tx - .send(notification) - .await - .map_err(|e: mpsc::error::SendError| XdsError::InternalProcessingError(e.to_string()))?; - - tokio::select! { - ack = internal_ack_rx => { - match ack { - Ok(rejected_configs) => { - let error = if rejected_configs.is_empty() { - debug!( - type_url = type_url.to_string(), - nonce, - "sending ack response after processing", - ); - let tracked_resources = state.tracked.entry(type_url).or_default(); - for (resource_id, resource_version) in pending_update_versions.drain() { - tracked_resources.insert(resource_id, resource_version); - } - None - } else { - let error = rejected_configs - .into_iter() - .map(|reject| reject.to_string()) - .collect::>() - .join("; "); - debug!( - type_url = type_url.to_string(), - error, - nonce, - "rejecting configs with nack response", - ); - Some(Status { - message: error, - ..Default::default() - }) - }; - if let Err(err) = acknowledgments_tx.send(DeltaDiscoveryRequest { - type_url: type_url.to_string(), - response_nonce: nonce, - error_detail: error, - ..Default::default() - }) - .await - { - warn!("error in send xDS ack/nack upstream {:?}", err); - } - }, - Err(err) => { - warn!("error in reading internal ack/nack {:?}", err); - }, - } - } - _ = time::sleep(Duration::from_secs(5)) => { - warn!("timed out while waiting to acknowledge config updates"); - let error = pending_update_versions.into_keys() - .collect::>() - .join("; "); - let error = Status { - message: error, - ..Default::default() - }; - let _ = acknowledgments_tx.send(DeltaDiscoveryRequest { - type_url: type_url.to_string(), - response_nonce: nonce, - error_detail: Some(error), - ..Default::default() - }) - .await; - } - } - - Ok(()) - } -} diff --git a/orion-data-plane-api/src/xds/mod.rs b/orion-data-plane-api/src/xds/mod.rs index b9084049..cdefa217 100644 --- a/orion-data-plane-api/src/xds/mod.rs +++ b/orion-data-plane-api/src/xds/mod.rs @@ -18,6 +18,4 @@ // // -pub mod bindings; -pub mod client; pub mod model; diff --git a/orion-data-plane-api/tests/xds.rs b/orion-data-plane-api/tests/xds.rs index e1c4a33d..cc083eb1 100644 --- a/orion-data-plane-api/tests/xds.rs +++ b/orion-data-plane-api/tests/xds.rs @@ -1,481 +1,476 @@ -// SPDX-FileCopyrightText: © 2025 kmesh authors -// SPDX-License-Identifier: Apache-2.0 -// -// Copyright 2025 kmesh authors -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -use std::{ - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, -}; - -use hyper_util::rt::tokio::TokioIo; -use orion_data_plane_api::{ - envoy_data_plane_api::{ - envoy::{ - config::cluster::v3::Cluster, - service::{ - cluster::v3::{ - cluster_discovery_service_client::ClusterDiscoveryServiceClient, - cluster_discovery_service_server::{ClusterDiscoveryService, ClusterDiscoveryServiceServer}, - }, - discovery::v3::{ - aggregated_discovery_service_client::AggregatedDiscoveryServiceClient, - aggregated_discovery_service_server::{ - AggregatedDiscoveryService, AggregatedDiscoveryServiceServer, - }, - }, - }, - }, - tonic, - }, - xds::client::DiscoveryClientBuilder, -}; -use tonic::transport::Server; - -use orion_data_plane_api::xds::{ - bindings, - model::{TypeUrl, XdsResourceUpdate}, -}; - -use futures::Stream; -use orion_data_plane_api::envoy_data_plane_api::{ - envoy::{ - config::core::v3::Node, - service::discovery::v3::{DeltaDiscoveryResponse, DiscoveryResponse, Resource}, - }, - google::protobuf::Any, - prost::Message, -}; -use tokio::{ - sync::{mpsc, Mutex}, - time::{self, sleep}, -}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{transport::Uri, Response, Status}; -use tower::service_fn; -pub struct MockDiscoveryService { - relay: Arc>>>, -} - -#[tonic::async_trait] -impl AggregatedDiscoveryService for MockDiscoveryService { - type StreamAggregatedResourcesStream = Pin> + Send>>; - async fn stream_aggregated_resources( - &self, - _request: tonic::Request< - tonic::Streaming< - orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, - >, - >, - ) -> std::result::Result, tonic::Status> { - unimplemented!("not used by proxy"); - } - - type DeltaAggregatedResourcesStream = Pin> + Send>>; - async fn delta_aggregated_resources( - &self, - request: tonic::Request< - tonic::Streaming< - orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, - >, - >, - ) -> std::result::Result, tonic::Status> { - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel::>(100); - let shared_receiver = self.relay.clone(); - tokio::spawn(async move { - let mut receiver = shared_receiver.lock().await; - 'outer: while let Ok(result) = in_stream.message().await { - match result { - Some(_) => { - while let Some(wrapped_response) = receiver.recv().await { - match tx.send(wrapped_response.clone()).await { - Ok(_) => { - if wrapped_response.is_err() { - break 'outer; - } - }, - _ => { - break 'outer; - }, - } - } - }, - _ => { - break; - }, - } - } - }); - let output_stream = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(output_stream) as Self::DeltaAggregatedResourcesStream)) - } -} - -#[tonic::async_trait] -impl ClusterDiscoveryService for MockDiscoveryService { - type StreamClustersStream = Pin> + Send>>; - async fn stream_clusters( - &self, - _request: tonic::Request< - tonic::Streaming< - orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, - >, - >, - ) -> std::result::Result, tonic::Status> { - unimplemented!("not used by proxy"); - } - - type DeltaClustersStream = Pin> + Send>>; - async fn delta_clusters( - &self, - request: tonic::Request< - tonic::Streaming< - orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, - >, - >, - ) -> std::result::Result, tonic::Status> { - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel::>(100); - let shared_receiver = self.relay.clone(); - tokio::spawn(async move { - let mut receiver = shared_receiver.lock().await; - 'outer: while let Ok(result) = in_stream.message().await { - match result { - Some(_) => { - while let Some(wrapped_response) = receiver.recv().await { - match tx.send(wrapped_response.clone()).await { - Ok(_) => { - if wrapped_response.is_err() { - break 'outer; - } - }, - _ => { - break 'outer; - }, - } - } - }, - _ => { - break; - }, - } - } - }); - let output_stream = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(output_stream) as Self::DeltaClustersStream)) - } - - async fn fetch_clusters( - &self, - _request: tonic::Request< - orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - unimplemented!("not used by proxy"); - } -} - -#[tokio::test] -async fn test_client_operations() { - let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; - let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; - let cluster_resource = Resource { - name: cluster.name.clone(), - version: "0.1".to_owned(), - resource: Some(Any { - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - value: cluster.encode_to_vec(), - }), - ..Default::default() - }; - let resources = vec![cluster_resource]; - - let initial_response: Result = Ok(DeltaDiscoveryResponse { - resources, - nonce: "abcd".to_owned(), - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - ..Default::default() - }); - - let (server_side_response_tx, server_side_response_rx) = - mpsc::channel::>(100); - - let (client, server) = tokio::io::duplex(1024); - let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; - tokio::spawn(async move { - Server::builder() - .add_service(ClusterDiscoveryServiceServer::new(cds_server)) - .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) - .await - }); - - let mut client = Some(client); - let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") - .expect("failed to init Endpoint") - .connect_with_connector(service_fn(move |_: Uri| { - let client = client.take(); - async move { - if let Some(client) = client { - Ok(TokioIo::new(client)) - } else { - Err(std::io::Error::other("client is already taken")) - } - } - })) - .await; - - let cds_client = ClusterDiscoveryServiceClient::new(channel.unwrap()); - let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; - - let (mut worker, mut client) = - DiscoveryClientBuilder::::new(node, typed_binding).build().unwrap(); - - tokio::spawn(async move { - let _status = worker.run().await; - }); - - let _status = server_side_response_tx.send(initial_response).await; - - let _ = client.subscribe("".to_owned(), TypeUrl::Cluster).await; - - tokio::select! { - Some(captured_response) = client.recv() => { - match captured_response.updates.first() { - Some(XdsResourceUpdate::Update(name, _payload)) => { - assert_eq!(name, "cluster-a"); - let ack_result = captured_response.ack_channel.send(vec![]); - assert!(ack_result.is_ok(), "failed to acknowledge response"); - } - _ => panic!("failed to receive config update from xDS") - } - } - _ = time::sleep(Duration::from_secs(5)) => - panic!("timed out waiting for xds resource over update channel") - } -} - -#[tokio::test] -async fn test_client_resilience() { - let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; - let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; - let cluster_resource = Resource { - name: cluster.name.clone(), - version: "0.1".to_owned(), - resource: Some(Any { - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - value: cluster.encode_to_vec(), - }), - ..Default::default() - }; - let resources = vec![cluster_resource]; - - let initial_response: Result = Ok(DeltaDiscoveryResponse { - resources, - nonce: "abcd".to_owned(), - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - ..Default::default() - }); - - let (server_side_response_tx, server_side_response_rx) = - mpsc::channel::>(100); - - let (client, server) = tokio::io::duplex(1024); - let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; - - tokio::spawn(async move { - Server::builder() - .add_service(ClusterDiscoveryServiceServer::new(cds_server)) - .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) - .await - }); - - let mut client = Some(client); - let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") - .expect("failed to init Endpoint") - .connect_with_connector_lazy(service_fn(move |_: Uri| { - let client = client.take(); - async move { - if let Some(client) = client { - Ok(TokioIo::new(client)) - } else { - Err(std::io::Error::other("client is already taken")) - } - } - })); - - let cds_client = ClusterDiscoveryServiceClient::new(channel); - let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; - - let (mut worker, mut client) = DiscoveryClientBuilder::::new(node, typed_binding) - .subscribe_resource_name("cluster-a".to_owned()) - .subscribe_resource_name("cluster-b".to_owned()) - .build() - .unwrap(); - - tokio::spawn(async move { - let _status = worker.run().await; - }); - let captured_count = AtomicUsize::new(0); - - let _status = server_side_response_tx.send(initial_response.clone()).await; - - tokio::select! { - Some(captured_response) = client.recv() => { - match captured_response.updates.first() { - Some(XdsResourceUpdate::Update(name, _payload)) => { - assert_eq!(name, "cluster-a"); - let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); - assert_eq!( - captured_count.load(Ordering::Relaxed), - 1, - "cluster-a should be captured just once after some time" - ); - } - _ => panic!("failed to receive config update from xDS") - } - } - _ = time::sleep(Duration::from_secs(3)) => - panic!("timed out waiting for xds resource over update channel") - } - - let abort_response: Result = - Err(tonic::Status::aborted("kill the stream for testing purposes")); - let _status = server_side_response_tx.send(abort_response).await; - sleep(Duration::from_millis(300)).await; - - let _status = server_side_response_tx.send(initial_response.clone()).await; - sleep(Duration::from_millis(300)).await; - - tokio::select! { - Some(captured_response) = client.recv() => { - match captured_response.updates.first() { - Some(XdsResourceUpdate::Update(name, _payload)) => { - assert_eq!(name, "cluster-a"); - let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); - assert_eq!( - captured_count.load(Ordering::Relaxed), - 2, - "cluster-a should be captured again after reconnect" - ); - } - _ => panic!("failed to receive config update from xDS") - } - } - _ = time::sleep(Duration::from_secs(3)) => - panic!("timed out waiting for xds resource over update channel") - } -} - -#[tokio::test] -async fn test_aggregated_discovery() { - let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; - let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; - let cluster_resource = Resource { - name: cluster.name.clone(), - version: "0.1".to_owned(), - resource: Some(Any { - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - value: cluster.encode_to_vec(), - }), - ..Default::default() - }; - let resources = vec![cluster_resource]; - - let initial_response: Result = Ok(DeltaDiscoveryResponse { - resources, - nonce: "abcd".to_owned(), - type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), - ..Default::default() - }); - - let (server_side_response_tx, server_side_response_rx) = - mpsc::channel::>(100); - - let (client, server) = tokio::io::duplex(1024); - let ads_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; - tokio::spawn(async move { - Server::builder() - .add_service(AggregatedDiscoveryServiceServer::new(ads_server)) - .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) - .await - }); - - let mut client = Some(client); - let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") - .expect("failed to init Endpoint") - .connect_with_connector(service_fn(move |_: Uri| { - let client = client.take(); - async move { - if let Some(client) = client { - Ok(TokioIo::new(client)) - } else { - Err(std::io::Error::other("client is already taken")) - } - } - })) - .await - .unwrap(); - - let ads_client = AggregatedDiscoveryServiceClient::new(channel.clone()); - let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; - - let client = DiscoveryClientBuilder::::new(node.clone(), typed_binding) - .subscribe_resource_name("my-cluster".to_owned()) - .build(); - assert!(client.is_err(), "cannot subscribe to resources without a type_url for ADS"); - - let ads_client = AggregatedDiscoveryServiceClient::new(channel); - let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; - - let (mut worker, mut client) = - DiscoveryClientBuilder::::new(node, typed_binding) - .subscribe_resource_name_by_typeurl("cluster-a".to_owned(), TypeUrl::Cluster) - .subscribe_resource_name_by_typeurl("cluster-z".to_owned(), TypeUrl::Cluster) - .subscribe_resource_name_by_typeurl("endpoints-a".to_owned(), TypeUrl::ClusterLoadAssignment) - .subscribe_resource_name_by_typeurl("secret-config-a".to_owned(), TypeUrl::Secret) - .build() - .unwrap(); - - tokio::spawn(async move { - let _status = worker.run().await; - }); - - let _status = server_side_response_tx.send(initial_response).await; - - let _ = client.subscribe("".to_owned(), TypeUrl::Cluster).await; - - tokio::select! { - Some(captured_response) = client.recv() => { - match captured_response.updates.first() { - Some(XdsResourceUpdate::Update(name, _payload)) => { - assert_eq!(name, "cluster-a"); - } - _ => panic!("failed to receive config update from xDS") - } - } - _ = time::sleep(Duration::from_secs(5)) => - panic!("timed out waiting for xds resource over update channel") - } -} +// // SPDX-FileCopyrightText: © 2025 kmesh authors +// // SPDX-License-Identifier: Apache-2.0 +// // +// // Copyright 2025 kmesh authors +// // +// // +// // Licensed under the Apache License, Version 2.0 (the "License"); +// // you may not use this file except in compliance with the License. +// // You may obtain a copy of the License at +// // +// // http://www.apache.org/licenses/LICENSE-2.0 +// // +// // Unless required by applicable law or agreed to in writing, software +// // distributed under the License is distributed on an "AS IS" BASIS, +// // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// // See the License for the specific language governing permissions and +// // limitations under the License. +// // + +// use std::{ +// pin::Pin, +// sync::{ +// atomic::{AtomicUsize, Ordering}, +// Arc, +// }, +// time::Duration, +// }; + +// use hyper_util::rt::tokio::TokioIo; +// use orion_data_plane_api::envoy_data_plane_api::{ +// envoy::{ +// config::cluster::v3::Cluster, +// service::{ +// cluster::v3::{ +// cluster_discovery_service_client::ClusterDiscoveryServiceClient, +// cluster_discovery_service_server::{ClusterDiscoveryService, ClusterDiscoveryServiceServer}, +// }, +// discovery::v3::{ +// aggregated_discovery_service_client::AggregatedDiscoveryServiceClient, +// aggregated_discovery_service_server::{AggregatedDiscoveryService, AggregatedDiscoveryServiceServer}, +// }, +// }, +// }, +// tonic, +// }; +// use tonic::transport::Server; + +// use orion_data_plane_api::xds::{ +// bindings, +// model::{TypeUrl, XdsResourceUpdate}, +// }; + +// use futures::Stream; +// use orion_data_plane_api::envoy_data_plane_api::{ +// envoy::{ +// config::core::v3::Node, +// service::discovery::v3::{DeltaDiscoveryResponse, DiscoveryResponse, Resource}, +// }, +// google::protobuf::Any, +// prost::Message, +// }; +// use tokio::{ +// sync::{mpsc, Mutex}, +// time::{self, sleep}, +// }; +// use tokio_stream::wrappers::ReceiverStream; +// use tonic::{transport::Uri, Response, Status}; +// use tower::service_fn; +// pub struct MockDiscoveryService { +// relay: Arc>>>, +// } + +// #[tonic::async_trait] +// impl AggregatedDiscoveryService for MockDiscoveryService { +// type StreamAggregatedResourcesStream = Pin> + Send>>; +// async fn stream_aggregated_resources( +// &self, +// _request: tonic::Request< +// tonic::Streaming< +// orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, +// >, +// >, +// ) -> std::result::Result, tonic::Status> { +// unimplemented!("not used by proxy"); +// } + +// type DeltaAggregatedResourcesStream = Pin> + Send>>; +// async fn delta_aggregated_resources( +// &self, +// request: tonic::Request< +// tonic::Streaming< +// orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, +// >, +// >, +// ) -> std::result::Result, tonic::Status> { +// let mut in_stream = request.into_inner(); +// let (tx, rx) = mpsc::channel::>(100); +// let shared_receiver = self.relay.clone(); +// tokio::spawn(async move { +// let mut receiver = shared_receiver.lock().await; +// 'outer: while let Ok(result) = in_stream.message().await { +// match result { +// Some(_) => { +// while let Some(wrapped_response) = receiver.recv().await { +// match tx.send(wrapped_response.clone()).await { +// Ok(_) => { +// if wrapped_response.is_err() { +// break 'outer; +// } +// }, +// _ => { +// break 'outer; +// }, +// } +// } +// }, +// _ => { +// break; +// }, +// } +// } +// }); +// let output_stream = ReceiverStream::new(rx); +// Ok(Response::new(Box::pin(output_stream) as Self::DeltaAggregatedResourcesStream)) +// } +// } + +// #[tonic::async_trait] +// impl ClusterDiscoveryService for MockDiscoveryService { +// type StreamClustersStream = Pin> + Send>>; +// async fn stream_clusters( +// &self, +// _request: tonic::Request< +// tonic::Streaming< +// orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, +// >, +// >, +// ) -> std::result::Result, tonic::Status> { +// unimplemented!("not used by proxy"); +// } + +// type DeltaClustersStream = Pin> + Send>>; +// async fn delta_clusters( +// &self, +// request: tonic::Request< +// tonic::Streaming< +// orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, +// >, +// >, +// ) -> std::result::Result, tonic::Status> { +// let mut in_stream = request.into_inner(); +// let (tx, rx) = mpsc::channel::>(100); +// let shared_receiver = self.relay.clone(); +// tokio::spawn(async move { +// let mut receiver = shared_receiver.lock().await; +// 'outer: while let Ok(result) = in_stream.message().await { +// match result { +// Some(_) => { +// while let Some(wrapped_response) = receiver.recv().await { +// match tx.send(wrapped_response.clone()).await { +// Ok(_) => { +// if wrapped_response.is_err() { +// break 'outer; +// } +// }, +// _ => { +// break 'outer; +// }, +// } +// } +// }, +// _ => { +// break; +// }, +// } +// } +// }); +// let output_stream = ReceiverStream::new(rx); +// Ok(Response::new(Box::pin(output_stream) as Self::DeltaClustersStream)) +// } + +// async fn fetch_clusters( +// &self, +// _request: tonic::Request< +// orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, +// >, +// ) -> std::result::Result< +// tonic::Response, +// tonic::Status, +// > { +// unimplemented!("not used by proxy"); +// } +// } + +// #[tokio::test] +// async fn test_client_operations() { +// let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; +// let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; +// let cluster_resource = Resource { +// name: cluster.name.clone(), +// version: "0.1".to_owned(), +// resource: Some(Any { +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// value: cluster.encode_to_vec(), +// }), +// ..Default::default() +// }; +// let resources = vec![cluster_resource]; + +// let initial_response: Result = Ok(DeltaDiscoveryResponse { +// resources, +// nonce: "abcd".to_owned(), +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// ..Default::default() +// }); + +// let (server_side_response_tx, server_side_response_rx) = +// mpsc::channel::>(100); + +// let (client, server) = tokio::io::duplex(1024); +// let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; +// tokio::spawn(async move { +// Server::builder() +// .add_service(ClusterDiscoveryServiceServer::new(cds_server)) +// .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) +// .await +// }); + +// let mut client = Some(client); +// let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") +// .expect("failed to init Endpoint") +// .connect_with_connector(service_fn(move |_: Uri| { +// let client = client.take(); +// async move { +// if let Some(client) = client { +// Ok(TokioIo::new(client)) +// } else { +// Err(std::io::Error::other("client is already taken")) +// } +// } +// })) +// .await; + +// let cds_client = ClusterDiscoveryServiceClient::new(channel.unwrap()); +// let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + +// let (mut worker, mut client) = +// DiscoveryClientBuilder::::new(node, typed_binding).build().unwrap(); + +// tokio::spawn(async move { +// let _status = worker.run().await; +// }); + +// let _status = server_side_response_tx.send(initial_response).await; + +// let _ = client.subscribe("".to_owned(), TypeUrl::Cluster).await; + +// tokio::select! { +// Some(captured_response) = client.recv() => { +// match captured_response.updates.first() { +// Some(XdsResourceUpdate::Update(name, _payload)) => { +// assert_eq!(name, "cluster-a"); +// let ack_result = captured_response.ack_channel.send(vec![]); +// assert!(ack_result.is_ok(), "failed to acknowledge response"); +// } +// _ => panic!("failed to receive config update from xDS") +// } +// } +// _ = time::sleep(Duration::from_secs(5)) => +// panic!("timed out waiting for xds resource over update channel") +// } +// } + +// #[tokio::test] +// async fn test_client_resilience() { +// let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; +// let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; +// let cluster_resource = Resource { +// name: cluster.name.clone(), +// version: "0.1".to_owned(), +// resource: Some(Any { +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// value: cluster.encode_to_vec(), +// }), +// ..Default::default() +// }; +// let resources = vec![cluster_resource]; + +// let initial_response: Result = Ok(DeltaDiscoveryResponse { +// resources, +// nonce: "abcd".to_owned(), +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// ..Default::default() +// }); + +// let (server_side_response_tx, server_side_response_rx) = +// mpsc::channel::>(100); + +// let (client, server) = tokio::io::duplex(1024); +// let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + +// tokio::spawn(async move { +// Server::builder() +// .add_service(ClusterDiscoveryServiceServer::new(cds_server)) +// .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) +// .await +// }); + +// let mut client = Some(client); +// let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") +// .expect("failed to init Endpoint") +// .connect_with_connector_lazy(service_fn(move |_: Uri| { +// let client = client.take(); +// async move { +// if let Some(client) = client { +// Ok(TokioIo::new(client)) +// } else { +// Err(std::io::Error::other("client is already taken")) +// } +// } +// })); + +// let cds_client = ClusterDiscoveryServiceClient::new(channel); +// let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + +// let (mut worker, mut client) = DiscoveryClientBuilder::::new(node, typed_binding) +// .subscribe_resource_name("cluster-a".to_owned()) +// .subscribe_resource_name("cluster-b".to_owned()) +// .build() +// .unwrap(); + +// tokio::spawn(async move { +// let _status = worker.run().await; +// }); +// let captured_count = AtomicUsize::new(0); + +// let _status = server_side_response_tx.send(initial_response.clone()).await; + +// tokio::select! { +// Some(captured_response) = client.recv() => { +// match captured_response.updates.first() { +// Some(XdsResourceUpdate::Update(name, _payload)) => { +// assert_eq!(name, "cluster-a"); +// let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); +// assert_eq!( +// captured_count.load(Ordering::Relaxed), +// 1, +// "cluster-a should be captured just once after some time" +// ); +// } +// _ => panic!("failed to receive config update from xDS") +// } +// } +// _ = time::sleep(Duration::from_secs(3)) => +// panic!("timed out waiting for xds resource over update channel") +// } + +// let abort_response: Result = +// Err(tonic::Status::aborted("kill the stream for testing purposes")); +// let _status = server_side_response_tx.send(abort_response).await; +// sleep(Duration::from_millis(300)).await; + +// let _status = server_side_response_tx.send(initial_response.clone()).await; +// sleep(Duration::from_millis(300)).await; + +// tokio::select! { +// Some(captured_response) = client.recv() => { +// match captured_response.updates.first() { +// Some(XdsResourceUpdate::Update(name, _payload)) => { +// assert_eq!(name, "cluster-a"); +// let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); +// assert_eq!( +// captured_count.load(Ordering::Relaxed), +// 2, +// "cluster-a should be captured again after reconnect" +// ); +// } +// _ => panic!("failed to receive config update from xDS") +// } +// } +// _ = time::sleep(Duration::from_secs(3)) => +// panic!("timed out waiting for xds resource over update channel") +// } +// } + +// #[tokio::test] +// async fn test_aggregated_discovery() { +// let node = Node { id: "node-id".to_owned(), cluster: "gw-cluster".to_owned(), ..Default::default() }; +// let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; +// let cluster_resource = Resource { +// name: cluster.name.clone(), +// version: "0.1".to_owned(), +// resource: Some(Any { +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// value: cluster.encode_to_vec(), +// }), +// ..Default::default() +// }; +// let resources = vec![cluster_resource]; + +// let initial_response: Result = Ok(DeltaDiscoveryResponse { +// resources, +// nonce: "abcd".to_owned(), +// type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), +// ..Default::default() +// }); + +// let (server_side_response_tx, server_side_response_rx) = +// mpsc::channel::>(100); + +// let (client, server) = tokio::io::duplex(1024); +// let ads_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; +// tokio::spawn(async move { +// Server::builder() +// .add_service(AggregatedDiscoveryServiceServer::new(ads_server)) +// .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) +// .await +// }); + +// let mut client = Some(client); +// let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") +// .expect("failed to init Endpoint") +// .connect_with_connector(service_fn(move |_: Uri| { +// let client = client.take(); +// async move { +// if let Some(client) = client { +// Ok(TokioIo::new(client)) +// } else { +// Err(std::io::Error::other("client is already taken")) +// } +// } +// })) +// .await +// .unwrap(); + +// let ads_client = AggregatedDiscoveryServiceClient::new(channel.clone()); +// let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + +// let client = DiscoveryClientBuilder::::new(node.clone(), typed_binding) +// .subscribe_resource_name("my-cluster".to_owned()) +// .build(); +// assert!(client.is_err(), "cannot subscribe to resources without a type_url for ADS"); + +// let ads_client = AggregatedDiscoveryServiceClient::new(channel); +// let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + +// let (mut worker, mut client) = +// DiscoveryClientBuilder::::new(node, typed_binding) +// .subscribe_resource_name_by_typeurl("cluster-a".to_owned(), TypeUrl::Cluster) +// .subscribe_resource_name_by_typeurl("cluster-z".to_owned(), TypeUrl::Cluster) +// .subscribe_resource_name_by_typeurl("endpoints-a".to_owned(), TypeUrl::ClusterLoadAssignment) +// .subscribe_resource_name_by_typeurl("secret-config-a".to_owned(), TypeUrl::Secret) +// .build() +// .unwrap(); + +// tokio::spawn(async move { +// let _status = worker.run().await; +// }); + +// let _status = server_side_response_tx.send(initial_response).await; + +// let _ = client.subscribe("".to_owned(), TypeUrl::Cluster).await; + +// tokio::select! { +// Some(captured_response) = client.recv() => { +// match captured_response.updates.first() { +// Some(XdsResourceUpdate::Update(name, _payload)) => { +// assert_eq!(name, "cluster-a"); +// } +// _ => panic!("failed to receive config update from xDS") +// } +// } +// _ = time::sleep(Duration::from_secs(5)) => +// panic!("timed out waiting for xds resource over update channel") +// } +// } diff --git a/orion-format/src/grammar.rs b/orion-format/src/grammar.rs index f9c9fda2..acb3dea9 100644 --- a/orion-format/src/grammar.rs +++ b/orion-format/src/grammar.rs @@ -386,7 +386,7 @@ impl Grammar for EnvoyGrammar { // Unit tests module #[cfg(test)] mod tests { - use crate::DEFAULT_ACCESS_LOG_FORMAT; + use crate::{DEFAULT_ACCESS_LOG_FORMAT, DEFAULT_ISTIO_2_ACCESS_LOG_FORMAT, DEFAULT_ISTIO_ACCESS_LOG_FORMAT}; use super::*; @@ -501,6 +501,12 @@ mod tests { _ = EnvoyGrammar::parse(DEFAULT_ACCESS_LOG_FORMAT).unwrap(); } + #[test] + fn test_istio_fmt() { + _ = EnvoyGrammar::parse(DEFAULT_ISTIO_ACCESS_LOG_FORMAT).unwrap(); + _ = EnvoyGrammar::parse(DEFAULT_ISTIO_2_ACCESS_LOG_FORMAT).unwrap(); + } + // bad patters.. #[test] diff --git a/orion-format/src/lib.rs b/orion-format/src/lib.rs index 923f8dea..b92457cd 100644 --- a/orion-format/src/lib.rs +++ b/orion-format/src/lib.rs @@ -39,6 +39,12 @@ use thread_local::ThreadLocal; pub const DEFAULT_ACCESS_LOG_FORMAT: &str = r#"[%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%" "#; +pub const DEFAULT_ISTIO_ACCESS_LOG_FORMAT: &str = r#"[%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS% %CONNECTION_TERMINATION_DETAILS% "%UPSTREAM_TRANSPORT_FAILURE_REASON%" %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%" %UPSTREAM_CLUSTER_RAW% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%" +"#; + +pub const DEFAULT_ISTIO_2_ACCESS_LOG_FORMAT: &str = r#"[%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %RESPONSE_CODE_DETAILS% %CONNECTION_TERMINATION_DETAILS% "%UPSTREAM_TRANSPORT_FAILURE_REASON%" %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%" %UPSTREAM_CLUSTER_RAW% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME% +"#; + #[derive(Error, Debug, Eq, PartialEq)] pub enum FormatError { #[error("invalid operator `{0}`")] diff --git a/orion-lib/Cargo.toml b/orion-lib/Cargo.toml index 32716d26..7df66ff2 100644 --- a/orion-lib/Cargo.toml +++ b/orion-lib/Cargo.toml @@ -1,9 +1,9 @@ [package] -edition.workspace = true +edition.workspace = true license-file.workspace = true -name = "orion-lib" +name = "orion-lib" rust-version.workspace = true -version.workspace = true +version.workspace = true [dependencies] abort-on-drop.workspace = true @@ -25,6 +25,7 @@ http-body.workspace = true http-body-util.workspace = true hyper = { version = "1", features = ["full"] } hyper-rustls = { version = "0.27.1", features = ["default", "http2"] } +if-addrs = "0.14" ipnet = "2.9" once_cell = { version = "1.19" } opentelemetry = "0.29.0" @@ -52,6 +53,7 @@ rustls-pemfile = "2.1" rustls-platform-verifier = { version = "0.3" } rustls-webpki = "0.102" scopeguard = "1.2.0" +socket2 = "0.6" serde.workspace = true smol_str = "0.3.2" thiserror.workspace = true @@ -70,18 +72,19 @@ typed-builder = "0.18.2" url.workspace = true uuid = { version = "1.17.0", features = ["v4"] } x509-parser = { version = "0.17", features = ["default"] } +hyperlocal = "0.9.1" [dev-dependencies] orion-data-plane-api.workspace = true -serde_yaml.workspace = true -tracing-test.workspace = true +serde_yaml.workspace = true +tracing-test.workspace = true [features] # making sure we are not using pure ring aws_lc_rs = ["rustls/aws_lc_rs"] -default = ["aws_lc_rs"] -metrics = ["orion-metrics/metrics"] +default = ["aws_lc_rs"] +metrics = ["orion-metrics/metrics"] [lints] workspace = true diff --git a/orion-lib/src/clusters/balancers/default_balancer.rs b/orion-lib/src/clusters/balancers/default_balancer.rs index b31a8336..663e506f 100644 --- a/orion-lib/src/clusters/balancers/default_balancer.rs +++ b/orion-lib/src/clusters/balancers/default_balancer.rs @@ -152,7 +152,7 @@ where #[cfg(test)] mod test { - use orion_configuration::config::cluster::HttpProtocolOptions; + use orion_configuration::config::{cluster::HttpProtocolOptions, core::envoy_conversions::Address, transport::BindDeviceOptions}; use std::sync::Arc; use super::DefaultBalancer; @@ -176,7 +176,15 @@ mod test { if health_status == HealthStatus::Healthy { healthy += 1; } - lb_endpoints.push(Arc::new(LbEndpoint::new(auth, "test_cluster", None, weight, health_status))); + let address = Address::Socket(auth.host().to_owned(), auth.port_u16().unwrap()); + lb_endpoints.push(Arc::new(LbEndpoint::new( + auth, + address, + "test_cluster", + BindDeviceOptions::default(), + weight, + health_status, + ))); } loc_lb_endpoints.push(LocalityLbEndpoints { diff --git a/orion-lib/src/clusters/balancers/priority.rs b/orion-lib/src/clusters/balancers/priority.rs index 2d8e4011..29daa989 100644 --- a/orion-lib/src/clusters/balancers/priority.rs +++ b/orion-lib/src/clusters/balancers/priority.rs @@ -50,6 +50,9 @@ impl Priority { pub fn calculate_priority_loads(endpoints: &HashMap>) -> Vec<(u32, u32)> { let mut priority_health = vec![]; let mut sorted_endpoints = endpoints.iter().collect::>(); + if sorted_endpoints.is_empty() { + return vec![]; + } sorted_endpoints.sort_by(|a, b| a.0.cmp(b.0)); for (k, v) in &sorted_endpoints { priority_health.push((*k, Self::calculate_priority_health(v.healthy, v.total))); diff --git a/orion-lib/src/clusters/cluster.rs b/orion-lib/src/clusters/cluster.rs index fd9336df..9cc61632 100644 --- a/orion-lib/src/clusters/cluster.rs +++ b/orion-lib/src/clusters/cluster.rs @@ -29,7 +29,7 @@ use crate::clusters::clusters_manager::{RoutingContext, RoutingRequirement}; use orion_configuration::config::cluster::{ Cluster as ClusterConfig, ClusterDiscoveryType, ClusterLoadAssignment as ClusterLoadAssignmentConfig, HealthCheck, }; -use tracing::debug; +use tracing::{debug, warn}; use webpki::types::ServerName; use super::health::HealthStatus; @@ -51,7 +51,7 @@ impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { let (cluster, secrets) = value; let config = cluster.clone(); let transport_socket_config = cluster.transport_socket; - let bind_device = cluster.bind_device; + let bind_device_options = cluster.bind_device_options; let load_balancing_policy = cluster.load_balancing_policy; let protocol_options = cluster.http_protocol_options; @@ -65,11 +65,14 @@ impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { .tls_configurator() .map(|tls_configurator| ServerName::try_from(tls_configurator.sni())) .transpose()?; + let cluster_name = cla.cluster_name.clone(); + let pcla = PartialClusterLoadAssignment::try_from(cla) + .map_err(|e| Error::from(format!("Unable to create cluster load assignment {cluster_name} {e}")))?; let cla = ClusterLoadAssignmentBuilder::builder() - .with_cla(PartialClusterLoadAssignment::try_from(cla)?) + .with_cla(pcla) .with_cluster_name(cluster.name.to_static_str()) - .with_bind_device(bind_device) + .with_bind_device_options(bind_device_options) .with_lb_policy(load_balancing_policy) .with_connection_timeout(cluster.connect_timeout) .with_transport_socket(transport_socket.clone()) @@ -96,7 +99,7 @@ impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { let cla = ClusterLoadAssignmentBuilder::builder() .with_cla(PartialClusterLoadAssignment::try_from(cla)?) .with_cluster_name(cluster.name.to_static_str()) - .with_bind_device(bind_device) + .with_bind_device_options(bind_device_options) .with_lb_policy(load_balancing_policy) .with_connection_timeout(cluster.connect_timeout) .with_transport_socket(transport_socket.clone()) @@ -113,16 +116,24 @@ impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { })) }, - ClusterDiscoveryType::Eds(None) => Ok(PartialClusterType::Dynamic(DynamicClusterBuilder { - name: cluster.name.to_static_str(), - bind_device, - transport_socket, - health_check, - load_balancing_policy, - config, - })), - ClusterDiscoveryType::Eds(Some(_)) => { - Err("EDS clusters can't have a static cluster load assignment configured".into()) + // ClusterDiscoveryType::Eds(None, None) => Ok(PartialClusterType::Dynamic(DynamicClusterBuilder { + // name: cluster.name.to_static_str(), + // bind_device, + // transport_socket, + // health_check, + // load_balancing_policy, + // config, + // })), + ClusterDiscoveryType::Eds(cla, eds) => { + warn!("Creating EDS cluster and skippint static endpoints {cla:?} {eds:?}"); + Ok(PartialClusterType::Dynamic(DynamicClusterBuilder { + name: cluster.name.to_static_str(), + bind_device_options, + transport_socket, + health_check, + load_balancing_policy, + config, + })) }, ClusterDiscoveryType::OriginalDst(_) => { let server_name = transport_socket @@ -133,7 +144,7 @@ impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { Ok(PartialClusterType::OnDemand(OriginalDstClusterBuilder { name: cluster.name.to_static_str(), - bind_device, + bind_device_options, transport_socket, connect_timeout: cluster.connect_timeout, server_name, @@ -175,7 +186,7 @@ impl TryFrom<&ClusterType> for ClusterConfig { ClusterType::Dynamic(dynamic_cluster) => { let cla: ClusterLoadAssignmentConfig = dynamic_cluster.try_into()?; let mut config = dynamic_cluster.config.clone(); - config.discovery_settings = ClusterDiscoveryType::Eds(Some(cla)); + config.discovery_settings = ClusterDiscoveryType::Eds(Some(cla), None); Ok(config) }, ClusterType::OnDemand(original_dst_cluster) => Ok(original_dst_cluster.config.clone()), @@ -231,8 +242,8 @@ mod tests { let cla = match c { ClusterType::Static(s) => Some(&s.load_assignment), - ClusterType::Dynamic(d) => { - assert_eq!(&d.bind_device, &expected_bind_device); + ClusterType::Dynamic(d) => { + assert_eq!(d.bind_device_options.bind_device, expected_bind_device); d.load_assignment.as_ref() }, ClusterType::OnDemand(_) => unreachable!("OnDemand cluster has no load assignment"), @@ -241,7 +252,7 @@ mod tests { if let Some(load_assignment) = cla { for lep in &load_assignment.endpoints { for ep in &lep.endpoints { - assert_eq!(ep.bind_device, expected_bind_device); + assert_eq!(ep.bind_device_options.bind_device, expected_bind_device); } } } diff --git a/orion-lib/src/clusters/cluster/dynamic.rs b/orion-lib/src/clusters/cluster/dynamic.rs index 2784e7b8..e4fa15ed 100644 --- a/orion-lib/src/clusters/cluster/dynamic.rs +++ b/orion-lib/src/clusters/cluster/dynamic.rs @@ -26,7 +26,7 @@ use orion_configuration::config::{ LbEndpoint as LbEndpointConfig, LbPolicy, LocalityLbEndpoints as LocalityLbEndpointsConfig, }, core::envoy_conversions::Address, - transport::BindDevice, + transport::{BindDeviceOptions}, }; use crate::{ @@ -45,7 +45,7 @@ use super::{ClusterOps, ClusterType}; #[derive(Debug, Clone)] pub struct DynamicClusterBuilder { pub name: &'static str, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub transport_socket: UpstreamTransportSocketConfigurator, pub health_check: Option, pub load_balancing_policy: LbPolicy, @@ -54,7 +54,7 @@ pub struct DynamicClusterBuilder { impl DynamicClusterBuilder { pub fn build(self) -> ClusterType { - let DynamicClusterBuilder { name, transport_socket, health_check, load_balancing_policy, bind_device, config } = + let DynamicClusterBuilder { name, transport_socket, health_check, load_balancing_policy, bind_device_options, config } = self; ClusterType::Dynamic(DynamicCluster { name, @@ -62,7 +62,7 @@ impl DynamicClusterBuilder { transport_socket, health_check, load_balancing_policy, - bind_device, + bind_device_options, config, }) } @@ -71,7 +71,7 @@ impl DynamicClusterBuilder { #[derive(Debug, Clone)] pub struct DynamicCluster { pub name: &'static str, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub(super) load_assignment: Option, pub transport_socket: UpstreamTransportSocketConfigurator, pub health_check: Option, @@ -183,6 +183,6 @@ impl TryFrom<&DynamicCluster> for ClusterLoadAssignmentConfig { Ok(LocalityLbEndpointsConfig { priority: lep.priority, lb_endpoints }) }) .collect::>>()?; - Ok(ClusterLoadAssignmentConfig { endpoints }) + Ok(ClusterLoadAssignmentConfig { cluster_name: cluster.name.to_owned(), endpoints }) } } diff --git a/orion-lib/src/clusters/cluster/original_dst.rs b/orion-lib/src/clusters/cluster/original_dst.rs index a05276e1..ed4bc35f 100644 --- a/orion-lib/src/clusters/cluster/original_dst.rs +++ b/orion-lib/src/clusters/cluster/original_dst.rs @@ -18,14 +18,19 @@ // // -use std::time::{Duration, Instant}; - -use rustls::ClientConfig; +use std::{ + collections::BTreeSet, + net::{IpAddr, SocketAddr}, + str::FromStr, + time::{Duration, Instant}, +}; +use if_addrs; use orion_configuration::config::{ cluster::{ClusterDiscoveryType, HealthCheck, OriginalDstRoutingMethod}, - transport::BindDevice, + transport::BindDeviceOptions, }; +use rustls::ClientConfig; use tracing::{debug, warn}; use webpki::types::ServerName; @@ -58,7 +63,7 @@ const DEFAULT_CLEANUP_INTERVAL: Duration = Duration::from_secs(5); #[derive(Debug, Clone)] pub struct OriginalDstClusterBuilder { pub name: &'static str, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub transport_socket: UpstreamTransportSocketConfigurator, pub connect_timeout: Option, pub server_name: Option>, @@ -67,8 +72,14 @@ pub struct OriginalDstClusterBuilder { impl OriginalDstClusterBuilder { pub fn build(self) -> ClusterType { - let OriginalDstClusterBuilder { name, bind_device, transport_socket, connect_timeout, server_name, config } = - self; + let OriginalDstClusterBuilder { + name, + bind_device_options, + transport_socket, + connect_timeout, + server_name, + config, + } = self; let (routing_requirements, upstream_port_override) = if let ClusterDiscoveryType::OriginalDst(ref original_dst_config) = config.discovery_settings { let routing_req = match &original_dst_config.routing_method { @@ -98,7 +109,7 @@ impl OriginalDstClusterBuilder { name, http_config, transport_socket, - bind_device, + bind_device_options, cleanup_interval: config.cleanup_interval.unwrap_or(DEFAULT_CLEANUP_INTERVAL), endpoints: lrumap::LruMap::new(), routing_requirements, @@ -121,8 +132,8 @@ struct HttpChannelConfig { pub struct OriginalDstCluster { pub name: &'static str, http_config: HttpChannelConfig, - transport_socket: UpstreamTransportSocketConfigurator, - bind_device: Option, + pub transport_socket: UpstreamTransportSocketConfigurator, + bind_device_options: BindDeviceOptions, cleanup_interval: Duration, endpoints: lrumap::LruMap, routing_requirements: RoutingRequirement, @@ -187,23 +198,31 @@ impl ClusterOps for OriginalDstCluster { } fn get_http_connection(&mut self, context: RoutingContext) -> Result { + warn!("OriginalDstCluster get HTTP connection for {:?}", context); match context { - RoutingContext::Authority(authority) => self.get_http_connection_by_authority(authority), + RoutingContext::Authority(authority, original_dst_address) => { + self.get_http_connection_by_authority(authority, Some(original_dst_address)) + }, RoutingContext::Header(header_value) => self.get_http_connection_by_header(header_value), _ => Err(format!("ORIGINAL_DST cluster {} requires authority or header routing context", self.name).into()), } } fn get_tcp_connection(&mut self, context: RoutingContext) -> Result { + warn!("OriginalDstCluster get TCP connection for {:?}", context); match context { - RoutingContext::Authority(authority) => self.get_tcp_connection_by_authority(authority), + RoutingContext::Authority(authority, original_dst_address) => { + self.get_tcp_connection_by_authority(authority, Some(original_dst_address)) + }, _ => Err(format!("ORIGINAL_DST cluster {} requires authority routing context", self.name).into()), } } fn get_grpc_connection(&mut self, context: RoutingContext) -> Result { match context { - RoutingContext::Authority(authority) => self.get_grpc_connection_by_authority(authority), + RoutingContext::Authority(authority, original_dst_address) => { + self.get_grpc_connection_by_authority(authority) + }, _ => Err(format!("ORIGINAL_DST cluster {} requires authority routing context", self.name).into()), } } @@ -236,15 +255,31 @@ impl OriginalDstCluster { self.cleanup_if_needed(); - let endpoint = - Endpoint::try_new(&authority, &self.http_config, self.bind_device.clone(), self.transport_socket.clone())?; + let endpoint = Endpoint::try_new( + &authority, + &self.http_config, + self.bind_device_options.clone(), + self.transport_socket.clone(), + )?; let grpc_service = endpoint.grpc_service()?; self.endpoints.insert(&endpoint_addr, endpoint); Ok(grpc_service) } - pub fn get_tcp_connection_by_authority(&mut self, authority: Authority) -> Result { - let authority = self.apply_port_override(authority)?; + pub fn get_tcp_connection_by_authority( + &mut self, + authority: Authority, + original_dst_address: Option, + ) -> Result { + debug!( + "Original Dst Cluster TCP original authority {authority} original dst address {original_dst_address:?} " + ); + + let authority = if let Some(dst_address) = original_dst_address { + Authority::from_str(&dst_address.to_string())? + } else { + authority + }; let endpoint_addr = EndpointAddress(authority.clone()); if let Some(endpoint) = self.endpoints.touch(&endpoint_addr) { @@ -253,8 +288,15 @@ impl OriginalDstCluster { self.cleanup_if_needed(); - let endpoint = - Endpoint::try_new(&authority, &self.http_config, self.bind_device.clone(), self.transport_socket.clone())?; + warn!("Original Dst Cluster using authority {authority}"); + + let endpoint = Endpoint::try_new( + &authority, + &self.http_config, + self.bind_device_options.clone(), + self.transport_socket.clone(), + )?; + let tcp_connector = endpoint.tcp_channel.clone(); self.endpoints.insert(&endpoint_addr, endpoint); Ok(tcp_connector) @@ -263,10 +305,24 @@ impl OriginalDstCluster { pub fn get_http_connection_by_header(&mut self, header_value: &HeaderValue) -> Result { let authority = Authority::try_from(header_value.as_bytes()) .map_err(|_| format!("Invalid authority in header for ORIGINAL_DST cluster {}", self.name))?; - self.get_http_connection_by_authority(authority) + self.get_http_connection_by_authority(authority, None) } - pub fn get_http_connection_by_authority(&mut self, authority: Authority) -> Result { + pub fn get_http_connection_by_authority( + &mut self, + authority: Authority, + original_dst_address: Option, + ) -> Result { + debug!( + "Original Dst Cluster HTTP original authority {authority} original dst address {original_dst_address:?} " + ); + + let authority = if let Some(dst_address) = original_dst_address { + Authority::from_str(&dst_address.to_string())? + } else { + authority + }; + let authority = self.apply_port_override(authority)?; let endpoint_addr = EndpointAddress(authority.clone()); @@ -276,8 +332,13 @@ impl OriginalDstCluster { self.cleanup_if_needed(); - let endpoint = - Endpoint::try_new(&authority, &self.http_config, self.bind_device.clone(), self.transport_socket.clone())?; + warn!("Original Dst Cluster using authority {authority}"); + let endpoint = Endpoint::try_new( + &authority, + &self.http_config, + self.bind_device_options.clone(), + self.transport_socket.clone(), + )?; let http_channel = endpoint.http_channel.clone(); self.endpoints.insert(&endpoint_addr, endpoint); Ok(http_channel) @@ -344,10 +405,10 @@ impl Endpoint { fn try_new( authority: &Authority, http_config: &HttpChannelConfig, - bind_device: Option, + bind_device_options: BindDeviceOptions, transport_socket: UpstreamTransportSocketConfigurator, ) -> Result { - let builder = HttpChannelBuilder::new(bind_device.clone()) + let builder = HttpChannelBuilder::new(bind_device_options.clone()) .with_authority(authority.clone()) .with_timeout(http_config.connect_timeout); let builder = if let Some(tls_conf) = &http_config.tls_configurator { @@ -359,11 +420,12 @@ impl Endpoint { } else { builder }; - let http_channel = builder.with_http_protocol_options(http_config.http_protocol_options.clone()).build()?; + let http_channel = + builder.with_http_protocol_options(http_config.http_protocol_options.clone()).build_with_no_address()?; let tcp_channel = TcpChannelConnector::new( authority, "original_dst_cluster", - bind_device, + bind_device_options, http_config.connect_timeout, transport_socket, ); @@ -694,7 +756,7 @@ mod tests { }), cleanup_interval, transport_socket: None, - bind_device: None, + bind_device_options: BindDeviceOptions::default(), load_balancing_policy: LbPolicy::ClusterProvided, http_protocol_options: HttpProtocolOptions::default(), health_check: None, @@ -717,8 +779,18 @@ mod tests { let mut cluster = build_original_dst_cluster(config); let authority = Authority::from_str("localhost:52000").unwrap(); - let channel1 = cluster.get_http_connection(RoutingContext::Authority(authority.clone())).unwrap(); - let channel2 = cluster.get_http_connection(RoutingContext::Authority(authority)).unwrap(); + let channel1 = cluster + .get_http_connection(RoutingContext::Authority( + authority.clone(), + "127.0.0.1:9000".parse().expect("Do expect this to work"), + )) + .unwrap(); + let channel2 = cluster + .get_http_connection(RoutingContext::Authority( + authority, + "127.0.0.1:9000".parse().expect("Do expect this to work"), + )) + .unwrap(); assert_eq!(channel1.upstream_authority, channel2.upstream_authority); assert_eq!(cluster.endpoints.len(), 1); @@ -748,7 +820,12 @@ mod tests { let mut cluster = build_original_dst_cluster(config); let authority = Authority::from_str("localhost:52000").unwrap(); - let _tcp_future = cluster.get_tcp_connection(RoutingContext::Authority(authority)).unwrap(); + let _tcp_future = cluster + .get_tcp_connection(RoutingContext::Authority( + authority, + "127.0.0.1:9000".parse().expect("Do expect this to work"), + )) + .unwrap(); let endpoints = cluster.all_tcp_channels(); assert_eq!(endpoints.len(), 1); @@ -765,9 +842,9 @@ mod tests { let mut cluster = build_original_dst_cluster(config); let auth1 = Authority::from_str("localhost:5100").unwrap(); - let auth1_context = RoutingContext::Authority(auth1); + let auth1_context = RoutingContext::Authority(auth1, "127.0.0.1:9000".parse().expect("Do expect this to work")); let auth2 = Authority::from_str("localhost:5101").unwrap(); - let auth2_context = RoutingContext::Authority(auth2); + let auth2_context = RoutingContext::Authority(auth2, "127.0.0.1:9000".parse().expect("Do expect this to work")); let _grpc1 = cluster.get_grpc_connection(auth1_context).unwrap(); let _grpc2 = cluster.get_grpc_connection(auth2_context).unwrap(); diff --git a/orion-lib/src/clusters/cluster/static.rs b/orion-lib/src/clusters/cluster/static.rs index 56f28b9e..8479407a 100644 --- a/orion-lib/src/clusters/cluster/static.rs +++ b/orion-lib/src/clusters/cluster/static.rs @@ -54,11 +54,17 @@ impl StaticClusterBuilder { pub struct StaticCluster { pub name: &'static str, pub load_assignment: ClusterLoadAssignment, - pub(super) transport_socket: UpstreamTransportSocketConfigurator, + pub transport_socket: UpstreamTransportSocketConfigurator, pub health_check: Option, pub config: orion_configuration::config::cluster::Cluster, } +impl StaticCluster{ + pub fn change_load_assignment(&mut self, cluster_load_assignment: ClusterLoadAssignment) { + self.load_assignment = cluster_load_assignment; + } +} + impl ClusterOps for StaticCluster { fn get_name(&self) -> &'static str { self.name @@ -115,4 +121,5 @@ impl ClusterOps for StaticCluster { _ => RoutingRequirement::None, } } + } diff --git a/orion-lib/src/clusters/clusters_manager.rs b/orion-lib/src/clusters/clusters_manager.rs index b042306d..2ee2698c 100644 --- a/orion-lib/src/clusters/clusters_manager.rs +++ b/orion-lib/src/clusters/clusters_manager.rs @@ -34,14 +34,14 @@ use crate::{ }; use http::{uri::Authority, HeaderName, HeaderValue, Request}; use hyper::body::Incoming; -use orion_configuration::config::cluster::{Cluster as ClusterConfig, ClusterSpecifier as ClusterSpecifierConfig}; +use orion_configuration::config::{cluster::{Cluster as ClusterConfig, ClusterSpecifier as ClusterSpecifierConfig}, transport::BindDeviceOptions}; use orion_interner::StringInterner; use rand::{prelude::SliceRandom, thread_rng}; use std::{ cell::RefCell, - collections::{btree_map::Entry as BTreeEntry, BTreeMap}, + collections::{btree_map::Entry as BTreeEntry, BTreeMap}, net::SocketAddr, }; -use tracing::warn; +use tracing::{info, warn}; type ClusterID = &'static str; type ClustersMap = BTreeMap; @@ -54,22 +54,46 @@ pub enum RoutingRequirement { Hash, } + pub enum RoutingContext<'a> { None, Header(&'a HeaderValue), - Authority(Authority), + Authority(Authority, SocketAddr), Hash(HashState<'a>), } -impl<'a> TryFrom<(&'a RoutingRequirement, &'a Request>>, HashState<'a>)> +impl std::fmt::Debug for RoutingContext<'_>{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::None => write!(f, "None"), + Self::Header(arg0) => f.debug_tuple("Header").field(arg0).finish(), + Self::Authority(arg0,_) => f.debug_tuple("Authority").field(arg0).finish(), + Self::Hash(_) => f.debug_tuple("Hash").finish(), + } + } +} + +impl std::fmt::Display for RoutingContext<'_>{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::None => Ok(f.write_str("RoutingContext None")?), + Self::Header(arg0) => Ok(f.write_str(&format!("RoutingContext header: {arg0:?}"))?), + Self::Authority(arg0, _) => Ok(f.write_str(&format!("RoutingContext authority: {arg0:?}"))?), + Self::Hash(_) => Ok(f.write_str("RoutingContext Hash")?), + } + + } +} + +impl<'a> TryFrom<(&'a RoutingRequirement, &'a Request>>, HashState<'a>, SocketAddr)> for RoutingContext<'a> { type Error = String; fn try_from( - value: (&'a RoutingRequirement, &'a Request>>, HashState<'a>), + value: (&'a RoutingRequirement, &'a Request>>, HashState<'a>, SocketAddr), ) -> std::result::Result { - let (routing_requirement, request, hash_state) = value; + let (routing_requirement, request, hash_state, original_destination_address) = value; match routing_requirement { RoutingRequirement::Header(header_name) => { let header_value = request @@ -78,10 +102,17 @@ impl<'a> TryFrom<(&'a RoutingRequirement, &'a Request { - let msg = "Routing by Authority is not currently supported, coming soon".to_owned(); - warn!(msg); - Err(msg) + RoutingRequirement::Authority => { + warn!("Routing by Authority {:?} {:?}",request.uri().authority(), request.headers().get(http::header::HOST)); + if request.uri().authority().is_none(){ + if let Some(host) = request.headers().get(http::header::HOST){ + Ok(RoutingContext::Authority(Authority::try_from(host.as_bytes()).map_err(|_op| "Routing by Authority.. can't convert host to authority".to_owned())?, original_destination_address)) + }else{ + Err("Routing by Authority.. No host header".to_owned()) + } + }else{ + Ok(RoutingContext::Authority(request.uri().authority().cloned().ok_or("Routing by Authority but not authority".to_owned())?,original_destination_address)) + } }, RoutingRequirement::Hash => Ok(RoutingContext::Hash(hash_state)), RoutingRequirement::None => Ok(RoutingContext::None), @@ -89,6 +120,8 @@ impl<'a> TryFrom<(&'a RoutingRequirement, &'a Request = CachedWatch::new(ClustersMap::new()); thread_local! { @@ -118,22 +151,37 @@ pub fn change_cluster_load_assignment(name: &str, cla: &PartialClusterLoadAssign .with_cla(cla.clone()) .with_transport_socket(dynamic_cluster.transport_socket.clone()) .with_cluster_name(dynamic_cluster.name) - .with_bind_device(dynamic_cluster.bind_device.clone()) + .with_bind_device_options(dynamic_cluster.bind_device_options.clone()) .with_lb_policy(dynamic_cluster.load_balancing_policy) .prepare(); cla.build().map(|cla| dynamic_cluster.change_load_assignment(Some(cla)))?; Ok(cluster.clone()) }, - ClusterType::Static(_) => { - let msg = format!("{name} Attempt to change CLA for static cluster"); - warn!(msg); - Err(msg.into()) - }, - ClusterType::OnDemand(_) => { - let msg = format!("{name} Attempt to change CLA for ORIGINAL_DST cluster"); + ClusterType::Static(static_cluster) => { + let msg = format!("{name} Attempt to change CLA for Static cluster "); warn!(msg); - Err(msg.into()) + let cla = ClusterLoadAssignmentBuilder::builder() + .with_cla(cla.clone()) + .with_transport_socket(static_cluster.transport_socket.clone()) + .with_cluster_name(static_cluster.name) + .with_bind_device_options(BindDeviceOptions::default()) + .with_lb_policy(orion_configuration::config::cluster::LbPolicy::RoundRobin) + .prepare(); + cla.build().map(|cla|static_cluster.change_load_assignment(cla))?; + Ok(cluster.clone()) }, + ClusterType::OnDemand(original_dst_cluster) => { + + if cla.is_empty(){ + let msg = format!("{name} Attempt to change CLA for ORIGINAL_DST cluster {cla:?}"); + info!(msg); + Ok(ClusterType::OnDemand(original_dst_cluster.clone())) + }else{ + let msg = format!("{name} Attempt to change CLA for ORIGINAL_DST cluster ...but endpoints are not empty {cla:?}"); + warn!(msg); + Err(msg.into()) + } + } } } else { let msg = format!("{name} No cluster found"); @@ -191,7 +239,9 @@ pub fn update_tls_context(secret_id: &str, secret: &TransportSecret) -> Result Result { - let cluster = partial_cluster.build()?; + let cluster = partial_cluster.build(); + let cluster = cluster?; + let cluster_name = cluster.get_name(); CLUSTERS_MAP.update(|current| match current.entry(cluster_name) { diff --git a/orion-lib/src/clusters/load_assignment.rs b/orion-lib/src/clusters/load_assignment.rs index 15bb07b8..e5beef70 100644 --- a/orion-lib/src/clusters/load_assignment.rs +++ b/orion-lib/src/clusters/load_assignment.rs @@ -21,9 +21,12 @@ use std::{sync::Arc, time::Duration}; use http::uri::Authority; -use orion_configuration::config::cluster::{ - ClusterLoadAssignment as ClusterLoadAssignmentConfig, HealthStatus, HttpProtocolOptions, - LbEndpoint as LbEndpointConfig, LbPolicy, LocalityLbEndpoints as LocalityLbEndpointsConfig, +use orion_configuration::config::{ + cluster::{ + ClusterLoadAssignment as ClusterLoadAssignmentConfig, HealthStatus, HttpProtocolOptions, + LbEndpoint as LbEndpointConfig, LbPolicy, LocalityLbEndpoints as LocalityLbEndpointsConfig, + }, + core::envoy_conversions::Address, transport::BindDeviceOptions, }; use tracing::debug; use typed_builder::TypedBuilder; @@ -40,7 +43,7 @@ use super::{ }; use crate::{ transport::{ - bind_device::BindDevice, GrpcService, HttpChannel, HttpChannelBuilder, TcpChannelConnector, + GrpcService, HttpChannel, HttpChannelBuilder, TcpChannelConnector, UpstreamTransportSocketConfigurator, }, Result, @@ -49,8 +52,9 @@ use crate::{ #[derive(Debug, Clone)] pub struct LbEndpoint { pub name: &'static str, + pub address: Address, pub authority: http::uri::Authority, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub weight: u32, pub health_status: HealthStatus, http_channel: HttpChannel, @@ -106,8 +110,9 @@ impl LbEndpoint { #[derive(Debug, Clone)] pub struct PartialLbEndpoint { + pub address: Address, pub authority: http::uri::Authority, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub weight: u32, pub health_status: HealthStatus, } @@ -115,8 +120,9 @@ pub struct PartialLbEndpoint { impl PartialLbEndpoint { fn new(value: &LbEndpoint) -> Self { PartialLbEndpoint { + address: value.address.clone(), authority: value.authority.clone(), - bind_device: value.bind_device.clone(), + bind_device_options: value.bind_device_options.clone(), weight: value.weight, health_status: value.health_status, } @@ -143,16 +149,17 @@ struct LbEndpointBuilder { impl LbEndpointBuilder { #[must_use] - fn replace_bind_device(mut self, bind_device: Option) -> Self { - self.endpoint.bind_device = bind_device; + fn replace_bind_device_options(mut self, bind_device_options: BindDeviceOptions) -> Self { + self.endpoint.bind_device_options = bind_device_options; self } pub fn build(self) -> Result> { let cluster_name = self.cluster_name; - let PartialLbEndpoint { authority, bind_device, weight, health_status } = self.endpoint; + let PartialLbEndpoint { address, authority, bind_device_options, weight, health_status } = self.endpoint; - let builder = HttpChannelBuilder::new(bind_device.clone()) + let builder = HttpChannelBuilder::new(bind_device_options.clone()) + .with_address(address.clone()) .with_authority(authority.clone()) .with_timeout(self.connect_timeout) .with_cluster_name(cluster_name); @@ -164,10 +171,11 @@ impl LbEndpointBuilder { builder.with_tls(maybe_tls_conf.cloned()) }; let http_channel = builder.with_http_protocol_options(self.http_protocol_options).build()?; + let tcp_channel = TcpChannelConnector::new( &authority, cluster_name, - bind_device.clone(), + bind_device_options.clone(), self.connect_timeout, self.transport_socket.clone(), ); @@ -175,7 +183,8 @@ impl LbEndpointBuilder { Ok(Arc::new(LbEndpoint { name: cluster_name, authority, - bind_device, + address, + bind_device_options, weight, health_status, http_channel, @@ -190,9 +199,12 @@ impl TryFrom for PartialLbEndpoint { fn try_from(lb_endpoint: LbEndpointConfig) -> Result { let health_status = lb_endpoint.health_status; let address = lb_endpoint.address; - let authority = http::uri::Authority::try_from(format!("{address}"))?; + let authority = match &address { + Address::Socket(_, _) => http::uri::Authority::try_from(format!("{address}"))?, + Address::Pipe(_, _) => http::uri::Authority::from_static("pipe_dream"), + }; let weight = lb_endpoint.load_balancing_weight.into(); - Ok(PartialLbEndpoint { authority, bind_device: None, weight, health_status }) + Ok(PartialLbEndpoint { address, authority, bind_device_options: BindDeviceOptions::default(), weight, health_status }) } } @@ -237,7 +249,7 @@ pub struct PartialLocalityLbEndpoints { #[builder(build_method(vis="", name=prepare), field_defaults(setter(prefix = "with_")))] pub struct LocalityLbEndpointsBuilder { cluster_name: &'static str, - bind_device: Option, + bind_device_options: BindDeviceOptions, endpoints: PartialLocalityLbEndpoints, http_protocol_options: HttpProtocolOptions, transport_socket: UpstreamTransportSocketConfigurator, @@ -263,7 +275,7 @@ impl LocalityLbEndpointsBuilder { .with_server_name(server_name) .with_http_protocol_options(self.http_protocol_options.clone()) .prepare() - .replace_bind_device(self.bind_device.clone()) + .replace_bind_device_options(self.bind_device_options.clone()) .build() }) .collect::>()?; @@ -349,6 +361,11 @@ pub struct ClusterLoadAssignment { pub struct PartialClusterLoadAssignment { endpoints: Vec, } +impl PartialClusterLoadAssignment{ + pub fn is_empty(&self)->bool{ + self.endpoints.is_empty() + } +} impl ClusterLoadAssignment { pub fn get_http_channel(&mut self, hash: Option) -> Result { @@ -413,7 +430,7 @@ impl ClusterLoadAssignment { pub struct ClusterLoadAssignmentBuilder { cluster_name: &'static str, cla: PartialClusterLoadAssignment, - bind_device: Option, + bind_device_options: BindDeviceOptions, #[builder(default)] protocol_options: Option, lb_policy: LbPolicy, @@ -430,7 +447,6 @@ impl ClusterLoadAssignmentBuilder { let protocol_options = self.protocol_options.unwrap_or_default(); let PartialClusterLoadAssignment { endpoints } = self.cla; - let endpoints = endpoints .into_iter() .map(|e| { @@ -439,7 +455,7 @@ impl ClusterLoadAssignmentBuilder { LocalityLbEndpointsBuilder::builder() .with_cluster_name(cluster_name) .with_endpoints(e) - .with_bind_device(self.bind_device.clone()) + .with_bind_device_options(self.bind_device_options.clone()) .with_connection_timeout(self.connection_timeout) .with_transport_socket(self.transport_socket.clone()) .with_server_name(server_name) @@ -475,9 +491,9 @@ impl TryFrom for PartialClusterLoadAssignment { let endpoints: Vec<_> = cla.endpoints.into_iter().map(PartialLocalityLbEndpoints::try_from).collect::>()?; - if endpoints.is_empty() { - return Err("At least one locality must be specified".into()); - } + // if endpoints.is_empty() { + // return Err("At least one locality must be specified".into()); + // } Ok(Self { endpoints }) } @@ -486,12 +502,13 @@ impl TryFrom for PartialClusterLoadAssignment { #[cfg(test)] mod test { use http::uri::Authority; + use orion_configuration::config::{core::envoy_conversions::Address, transport::BindDeviceOptions}; use super::LbEndpoint; use crate::{ clusters::health::HealthStatus, transport::{ - bind_device::BindDevice, HttpChannelBuilder, TcpChannelConnector, UpstreamTransportSocketConfigurator, + HttpChannelBuilder, TcpChannelConnector, UpstreamTransportSocketConfigurator, }, }; @@ -499,12 +516,13 @@ mod test { /// This function is used by unit tests in other modules pub fn new( authority: Authority, + address: Address, cluster_name: &'static str, - bind_device: Option, + bind_device_options: BindDeviceOptions, weight: u32, health_status: HealthStatus, ) -> Self { - let http_channel = HttpChannelBuilder::new(bind_device.clone()) + let http_channel = HttpChannelBuilder::new(bind_device_options.clone()) .with_authority(authority.clone()) .with_cluster_name(cluster_name) .build() @@ -512,12 +530,12 @@ mod test { let tcp_channel = TcpChannelConnector::new( &authority, "test_cluster", - bind_device.clone(), + bind_device_options.clone(), None, UpstreamTransportSocketConfigurator::default(), ); - Self { name: "Cluster", authority, bind_device, weight, health_status, http_channel, tcp_channel } + Self { name: "Cluster", authority, address, bind_device_options, weight, health_status, http_channel, tcp_channel } } } } diff --git a/orion-lib/src/clusters/retry_policy.rs b/orion-lib/src/clusters/retry_policy.rs index 092d43c8..64deb0a7 100644 --- a/orion-lib/src/clusters/retry_policy.rs +++ b/orion-lib/src/clusters/retry_policy.rs @@ -25,6 +25,7 @@ use orion_configuration::config::network_filters::http_connection_manager::{Retr use orion_format::types::ResponseFlags as FmtResponseFlags; use tokio::time::error::Elapsed; +use tracing::warn; use crate::{body::response_flags::ResponseFlags, Error as BoxError}; use std::{error::Error, io}; @@ -277,6 +278,9 @@ impl RetryCondition<'_, B> { return true; } }, + _ => { + warn!("Unsupported retry policy {policy:?}"); + } } } false diff --git a/orion-lib/src/configuration/mod.rs b/orion-lib/src/configuration/mod.rs index 05a5d6a0..ebaea269 100644 --- a/orion-lib/src/configuration/mod.rs +++ b/orion-lib/src/configuration/mod.rs @@ -45,7 +45,7 @@ pub fn get_listeners_and_clusters( .collect::>>()?; if clusters.is_empty() { //shouldn't happen with new config - return Err::<(SecretManager, Vec<_>, Vec<_>), Error>("No clusters configured".into()); + return Err::<(SecretManager, Vec<_>, Vec<_>), Error>("No valid clusters configured".into()); } Ok((secret_manager, listeners, clusters)) } diff --git a/orion-lib/src/listeners/filter_state.rs b/orion-lib/src/listeners/filter_state.rs index 9c66ea6d..4bc3ca9b 100644 --- a/orion-lib/src/listeners/filter_state.rs +++ b/orion-lib/src/listeners/filter_state.rs @@ -26,6 +26,7 @@ pub enum DownstreamConnectionMetadata { FromSocket { peer_address: SocketAddr, local_address: SocketAddr, + original_destination_address: Option, }, FromProxyProtocol { original_peer_address: SocketAddr, @@ -50,4 +51,11 @@ impl DownstreamConnectionMetadata { Self::FromProxyProtocol { original_destination_address, .. } => *original_destination_address, } } + pub fn original_destination_address(&self) -> SocketAddr { + match self { + Self::FromSocket { original_destination_address: Some(dst_address), .. } => *dst_address, + Self::FromSocket { local_address, original_destination_address: None, .. } => *local_address, + Self::FromProxyProtocol { original_destination_address, .. } => *original_destination_address, + } + } } diff --git a/orion-lib/src/listeners/http_connection_manager.rs b/orion-lib/src/listeners/http_connection_manager.rs index c20195c8..fb3f9237 100644 --- a/orion-lib/src/listeners/http_connection_manager.rs +++ b/orion-lib/src/listeners/http_connection_manager.rs @@ -41,7 +41,7 @@ use hyper::{body::Incoming, service::Service, Request, Response}; use opentelemetry::global::BoxedSpan; use opentelemetry::trace::{Span, Status}; use opentelemetry::KeyValue; -use orion_configuration::config::GenericError; +use orion_configuration::config::{ConfigSource, ConfigSourceSpecifier, GenericError}; use orion_tracing::span_state::SpanState; use orion_tracing::{attributes::HTTP_RESPONSE_STATUS_CODE, with_client_span, with_server_span}; @@ -56,8 +56,7 @@ use orion_configuration::config::network_filters::{ http_connection_manager::{ http_filters::{http_rbac::HttpRbac, HttpFilter as HttpFilterConfig, HttpFilterType}, route::{Action, RouteMatchResult}, - CodecType, ConfigSource, ConfigSourceSpecifier, HttpConnectionManager as HttpConnectionManagerConfig, - RdsSpecifier, RouteSpecifier, UpgradeType, + CodecType, HttpConnectionManager as HttpConnectionManagerConfig, RdsSpecifier, RouteSpecifier, UpgradeType, }, }; use orion_format::context::{ @@ -185,6 +184,7 @@ pub enum HttpFilterValue { // while Rbac uses a configuration type - we might want to revisit this RateLimit(LocalRateLimit), Rbac(HttpRbac), + Ignored, } impl From for HttpFilter { @@ -193,6 +193,7 @@ impl From for HttpFilter { let filter = match filter { HttpFilterType::RateLimit(r) => HttpFilterValue::RateLimit(r.into()), HttpFilterType::Rbac(rbac) => HttpFilterValue::Rbac(rbac), + HttpFilterType::Ingored => HttpFilterValue::Ignored, }; Self { name, disabled, filter: Some(filter) } } @@ -203,12 +204,15 @@ impl HttpFilterValue { match self { HttpFilterValue::Rbac(rbac) => apply_authorization_rules(rbac, request), HttpFilterValue::RateLimit(rl) => rl.run(request), + HttpFilterValue::Ignored => FilterDecision::Continue, } } pub fn apply_response(&self, _response: &mut Response) -> FilterDecision { match self { // RBAC and RateLimit do not apply on the response path - HttpFilterValue::Rbac(_) | HttpFilterValue::RateLimit(_) => FilterDecision::Continue, + HttpFilterValue::Rbac(_) | HttpFilterValue::RateLimit(_) | HttpFilterValue::Ignored => { + FilterDecision::Continue + }, } } fn from_filter_override(value: &FilterOverride) -> Option { @@ -755,6 +759,7 @@ impl retry_policy: chosen_route.vh.retry_policy.as_ref(), route_match: chosen_route.route_match, remote_address: downstream_metadata.peer_address(), + original_destination_address: downstream_metadata.original_destination_address(), websocket_enabled_by_default, }, &connection_manager, diff --git a/orion-lib/src/listeners/http_connection_manager/route.rs b/orion-lib/src/listeners/http_connection_manager/route.rs index cad19c73..89b198ef 100644 --- a/orion-lib/src/listeners/http_connection_manager/route.rs +++ b/orion-lib/src/listeners/http_connection_manager/route.rs @@ -49,7 +49,7 @@ use orion_tracing::attributes::{UPSTREAM_ADDRESS, UPSTREAM_CLUSTER_NAME}; use orion_tracing::http_tracer::{SpanKind, SpanName}; use smol_str::ToSmolStr; use std::net::SocketAddr; -use tracing::debug; +use tracing::{debug, info}; pub struct MatchedRequest<'a> { pub request: Request>>, @@ -57,6 +57,7 @@ pub struct MatchedRequest<'a> { pub remote_address: SocketAddr, pub route_match: RouteMatchResult, pub websocket_enabled_by_default: bool, + pub original_destination_address: SocketAddr } impl<'a> RequestHandler<(MatchedRequest<'a>, &HttpConnectionManager)> for &RouteAction { @@ -72,12 +73,20 @@ impl<'a> RequestHandler<(MatchedRequest<'a>, &HttpConnectionManager)> for &Route remote_address, route_match, websocket_enabled_by_default, + original_destination_address } = request; + let uri = downstream_request.uri().clone(); + + info!("Handling request for {} {:?}", uri, &self.cluster_specifier); let cluster_id = clusters_manager::resolve_cluster(&self.cluster_specifier) .ok_or_else(|| "Failed to resolve cluster from specifier".to_owned())?; let routing_requirement = clusters_manager::get_cluster_routing_requirements(cluster_id); let hash_state = HashState::new(self.hash_policy.as_slice(), &downstream_request, remote_address); - let routing_context = RoutingContext::try_from((&routing_requirement, &downstream_request, hash_state))?; + let routing_context = RoutingContext::try_from((&routing_requirement, &downstream_request, hash_state, original_destination_address))?; + + + info!("Handling request for {} {} {} routing req = {:?}", uri, cluster_id, remote_address, routing_requirement); + let maybe_channel = clusters_manager::get_http_connection(cluster_id, routing_context); match maybe_channel { diff --git a/orion-lib/src/listeners/listener.rs b/orion-lib/src/listeners/listener.rs index 12eda9c3..3395960f 100644 --- a/orion-lib/src/listeners/listener.rs +++ b/orion-lib/src/listeners/listener.rs @@ -30,8 +30,9 @@ use crate::{ }; use opentelemetry::KeyValue; use orion_configuration::config::{ - listener::{FilterChainMatch, Listener as ListenerConfig, MatchResult}, + listener::{DetectedTransportProtocol, FilterChainMatch, Listener as ListenerConfig, MatchResult}, listener_filters::DownstreamProxyProtocolConfig, + transport::BindDeviceOptions, }; use orion_interner::StringInterner; use orion_metrics::{ @@ -40,6 +41,7 @@ use orion_metrics::{ }; use rustls::ServerConfig; use scopeguard::defer; +use std::os::fd::AsFd; use std::{ collections::HashMap, fmt::Debug, @@ -59,7 +61,7 @@ use tracing::{debug, info, warn}; struct PartialListener { name: &'static str, socket_address: std::net::SocketAddr, - bind_device: Option, + bind_device_options: BindDeviceOptions, filter_chains: HashMap, with_tls_inspector: bool, proxy_protocol_config: Option, @@ -84,7 +86,7 @@ impl TryFrom> for PartialListener { .into_iter() .map(|f| FilterchainBuilder::try_from(ConversionContext::new((f.1, secret_manager))).map(|x| (f.0, x))) .collect::>()?; - let bind_device = listener.bind_device; + let bind_device_options = listener.bind_device_options; if !with_tls_inspector { let has_server_names = filter_chains.keys().any(|m| !m.server_names.is_empty()); @@ -98,7 +100,7 @@ impl TryFrom> for PartialListener { Ok(PartialListener { name, socket_address: addr, - bind_device, + bind_device_options, filter_chains, with_tls_inspector, proxy_protocol_config, @@ -115,7 +117,7 @@ impl ListenerFactory { let PartialListener { name, socket_address, - bind_device, + bind_device_options, filter_chains, with_tls_inspector, proxy_protocol_config, @@ -129,7 +131,7 @@ impl ListenerFactory { Ok(Listener { name, socket_address, - bind_device, + bind_device_options, filter_chains, with_tls_inspector, proxy_protocol_config, @@ -151,7 +153,7 @@ impl TryFrom> for ListenerFactory { pub struct Listener { name: &'static str, socket_address: std::net::SocketAddr, - bind_device: Option, + bind_device_options: BindDeviceOptions, pub filter_chains: HashMap, with_tls_inspector: bool, proxy_protocol_config: Option, @@ -170,7 +172,7 @@ impl Listener { Listener { name, socket_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), - bind_device: None, + bind_device_options: BindDeviceOptions::default(), filter_chains: HashMap::new(), with_tls_inspector: false, proxy_protocol_config: None, @@ -183,21 +185,21 @@ impl Listener { self.name } pub fn get_socket(&self) -> (&std::net::SocketAddr, Option<&BindDevice>) { - (&self.socket_address, self.bind_device.as_ref()) + (&self.socket_address, self.bind_device_options.bind_device.as_ref()) } pub async fn start(self) -> Error { let Self { name, socket_address: local_address, - bind_device, + bind_device_options, filter_chains, with_tls_inspector, proxy_protocol_config, mut route_updates_receiver, mut secret_updates_receiver, } = self; - let listener = match configure_and_start_tcp_listener(local_address, bind_device.as_ref()) { + let listener = match configure_and_start_tcp_listener(local_address, bind_device_options) { Ok(x) => x, Err(e) => return e, }; @@ -214,6 +216,35 @@ impl Listener { maybe_stream = listener.accept() => { match maybe_stream { Ok((stream, peer_addr)) => { + + let original_destination_address:Option = { + let raw_socket = stream.as_fd(); + if let Ok(s) = raw_socket.try_clone_to_owned(){ + let socket = socket2::Socket::from(s); + + let maybe_v4 = match (socket.original_dst_v4(), stream.local_addr()){ + (Ok(original), Ok(local)) => original.as_socket().and_then(|o| (o!=local).then_some(o)), + _ => None + + }; + + let maybe_v6 = match (socket.original_dst_v6(), stream.local_addr()){ + (Ok(original), Ok(local)) => original.as_socket().and_then(|o| (o!=local).then_some(o)), + _ => None + }; + match (maybe_v4, maybe_v6){ + (None, None) => None, + (None, Some(dst)) | (Some(dst), None) => Some(dst), + (Some(dst1), Some(_)) => Some(dst1), + } + }else{ + warn!("Unable to obtained a cloned fd for socket... "); + None + } + }; + + debug!("Original dst address {:?}", original_destination_address); + let start = std::time::Instant::now(); // This is a new downstream connection... @@ -230,7 +261,7 @@ impl Listener { // we could optimize a little here by either splitting up the filter_chain selection and rbac into the parts that can run // before we have the ClientHello and the ones after. since we might already have enough info to decide to drop the connection // or pick a specific filter_chain to run, or we could simply if-else on the with_tls_inspector variable. - tokio::spawn(Self::process_listener_update(name, filter_chains, with_tls_inspector, proxy_protocol_config, local_address, peer_addr, Box::new(stream), start)); + tokio::spawn(Self::process_listener_update(name, filter_chains, with_tls_inspector, proxy_protocol_config, local_address, peer_addr, original_destination_address, Box::new(stream), start)); }, Err(e) => {warn!("failed to accept tcp connection: {e}");} } @@ -261,9 +292,11 @@ impl Listener { filter_chains: &'a HashMap, downstream_metadata: &DownstreamConnectionMetadata, server_name: Option<&str>, + detected_transport_protocol: DetectedTransportProtocol, ) -> Result> { let source_addr = downstream_metadata.peer_address(); - let destination_addr = downstream_metadata.local_address(); + let destination_addr = downstream_metadata.original_destination_address(); + fn match_subitem<'a, F: Fn(&FilterChainMatch, T) -> MatchResult, T: Copy>( function: F, comparand: T, @@ -289,10 +322,10 @@ impl Listener { } } - //todo: smallvec? other optimization? let mut possible_filters = vec![true; filter_chains.len()]; let mut scratchpad = vec![MatchResult::NoRule; filter_chains.len()]; + debug!("Possible filters 1 {possible_filters:?}"); match_subitem( FilterChainMatch::matches_destination_port, destination_addr.port(), @@ -300,7 +333,7 @@ impl Listener { &mut scratchpad, &mut possible_filters, ); - + debug!("Possible filters 2 {possible_filters:?}"); match_subitem( FilterChainMatch::matches_destination_ip, destination_addr.ip(), @@ -308,7 +341,7 @@ impl Listener { &mut scratchpad, &mut possible_filters, ); - + debug!("Possible filters 3 {possible_filters:?}"); match_subitem( FilterChainMatch::matches_server_name, server_name.unwrap_or_default(), @@ -316,7 +349,15 @@ impl Listener { &mut scratchpad, &mut possible_filters, ); - + debug!("Possible filters 4 {possible_filters:?}"); + match_subitem( + FilterChainMatch::matches_detected_transport_protocol, + detected_transport_protocol, + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + debug!("Possible filters 5 {possible_filters:?}"); match_subitem( FilterChainMatch::matches_source_ip, source_addr.ip(), @@ -324,7 +365,7 @@ impl Listener { &mut scratchpad, &mut possible_filters, ); - + debug!("Possible filters 6 {possible_filters:?}"); match_subitem( FilterChainMatch::matches_source_port, source_addr.port(), @@ -332,7 +373,7 @@ impl Listener { &mut scratchpad, &mut possible_filters, ); - + debug!("Possible filters 7 {possible_filters:?}"); let mut possible_filters = possible_filters .into_iter() .zip(filter_chains.iter()) @@ -346,6 +387,8 @@ impl Listener { } } + #[allow(clippy::too_many_lines)] + #[allow(clippy::too_many_arguments)] async fn process_listener_update( listener_name: &'static str, filter_chains: Arc>, @@ -353,6 +396,7 @@ impl Listener { proxy_protocol_config: Option>, local_address: SocketAddr, peer_addr: SocketAddr, + original_destination_address: Option, mut stream: AsyncStream, start_instant: std::time::Instant, ) -> Result<()> { @@ -369,6 +413,7 @@ impl Listener { .unwrap_or(u64::MAX); with_histogram!(listeners::DOWNSTREAM_CX_LENGTH_MS, record, ms, &[KeyValue::new("listener", listener_name)]); } + let mut detected_transport_protocol = DetectedTransportProtocol::RawBuffer; let downstream_metadata = if let Some(config) = proxy_protocol_config.as_ref() { let reader = ProxyProtocolReader::new(Arc::clone(config)); @@ -376,7 +421,11 @@ impl Listener { stream = new_stream; metadata } else { - DownstreamConnectionMetadata::FromSocket { peer_address: peer_addr, local_address } + DownstreamConnectionMetadata::FromSocket { + peer_address: peer_addr, + local_address, + original_destination_address, + } }; let downstream_metadata = Arc::new(downstream_metadata); @@ -401,6 +450,7 @@ impl Listener { &[KeyValue::new("listener", listener_name)] ); ssl.store(true, Ordering::Relaxed); + detected_transport_protocol = DetectedTransportProtocol::Ssl; Some(sni) }, crate::transport::tls_inspector::InspectorResult::SuccessNoSni => { @@ -420,6 +470,7 @@ impl Listener { &[KeyValue::new("listener", listener_name)] ); ssl.store(true, Ordering::Relaxed); + detected_transport_protocol = DetectedTransportProtocol::Ssl; None }, crate::transport::tls_inspector::InspectorResult::TlsError(e) => { @@ -431,11 +482,17 @@ impl Listener { None }; - let selected_filterchain = - Self::select_filterchain(&filter_chains, &downstream_metadata, server_name.as_deref())?; + let selected_filterchain = Self::select_filterchain( + &filter_chains, + &downstream_metadata, + server_name.as_deref(), + detected_transport_protocol, + )?; if let Some(filterchain) = selected_filterchain { - debug!( - "{listener_name} : mapping connection from {peer_addr} to filter chain {}", + info!( + "{listener_name} : mapping connection from {peer_addr}->{} {:?} to filter chain {}", + downstream_metadata.local_address(), + server_name.as_deref(), filterchain.filter_chain().name ); if let Some(stream) = filterchain.apply_rbac(stream, &downstream_metadata, server_name.as_deref()) { @@ -526,20 +583,25 @@ impl Listener { } } -fn configure_and_start_tcp_listener(addr: SocketAddr, device: Option<&BindDevice>) -> Result { +fn configure_and_start_tcp_listener(addr: SocketAddr, bind_device_options: BindDeviceOptions) -> Result { let socket = if addr.is_ipv4() { TcpSocket::new_v4()? } else { TcpSocket::new_v6()? }; socket.set_reuseaddr(true)?; socket.set_keepalive(true)?; - if let Some(device) = device { - crate::transport::bind_device::bind_device(&socket, device)?; + if let Some(device) = bind_device_options.bind_device { + crate::transport::bind_device::bind_device(&socket, &device)?; } #[cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))] socket.set_reuseport(true)?; - socket.bind(addr)?; + if let Some(false) = bind_device_options.bind_to_port { + Ok(socket.listen(128)?) + } else { + socket.bind(addr)?; + Ok(socket.listen(128)?) + } - Ok(socket.listen(128)?) + } #[cfg(test)] @@ -596,7 +658,7 @@ socket_options: let l = PartialListener::try_from(ctx).unwrap(); let expected_bind_device = Some(BindDevice::from_str("virt1").unwrap()); - assert_eq!(&l.bind_device, &expected_bind_device); + assert_eq!(&l.bind_device_options.bind_device, &expected_bind_device); } #[test] @@ -612,6 +674,8 @@ socket_options: ], source_prefix_ranges: Vec::new(), source_ports: Vec::new(), + transport_protocol: String::new(), + application_protocols: vec![], }, 0, ), @@ -621,8 +685,10 @@ socket_options: let metadata = DownstreamConnectionMetadata::FromSocket { peer_address: (Ipv4Addr::LOCALHOST, 33000).into(), local_address: (Ipv4Addr::LOCALHOST, 8443).into(), + original_destination_address: None, }; - let selected = Listener::select_filterchain(&hashmap, &metadata, None).unwrap(); + let selected = + Listener::select_filterchain(&hashmap, &metadata, None, DetectedTransportProtocol::RawBuffer).unwrap(); assert_eq!(selected.copied(), Some(1)); } @@ -683,11 +749,60 @@ filter_chains: let metadata = DownstreamConnectionMetadata::FromSocket { peer_address: (Ipv4Addr::LOCALHOST, 3300).into(), local_address: (Ipv4Addr::LOCALHOST, 443).into(), + original_destination_address: None, }; let good_host = Some("host.test"); - assert!(matches!(Listener::select_filterchain(&m, &metadata, good_host), Ok(Some(())))); - assert!(matches!(Listener::select_filterchain(&m, &metadata, Some("a.wildcard")), Ok(Some(())))); - assert!(matches!(Listener::select_filterchain(&m, &metadata, None), Ok(None))); + assert!(matches!( + Listener::select_filterchain(&m, &metadata, good_host, DetectedTransportProtocol::RawBuffer), + Ok(Some(())) + )); + assert!(matches!( + Listener::select_filterchain(&m, &metadata, Some("a.wildcard"), DetectedTransportProtocol::RawBuffer), + Ok(Some(())) + )); + assert!(matches!( + Listener::select_filterchain(&m, &metadata, None, DetectedTransportProtocol::RawBuffer), + Ok(None) + )); + } + + #[traced_test] + #[test] + fn filter_chain_multiple_with_transport() { + let l: EnvoyListener = from_yaml( + " + name: listener + filter_chains: + - filter_chain_match: + destination_port: 443 + - filter_chain_match: + transport_protocol: raw_buffer + ", + ) + .unwrap(); + + let m = l + .filter_chains + .into_iter() + .enumerate() + .map(|(i, fc)| { + fc.filter_chain_match + .map(FilterChainMatchConfig::try_from) + .transpose() + .map(|x| (x.unwrap_or_default(), i)) + }) + .collect::, _>>() + .unwrap(); + + let metadata = DownstreamConnectionMetadata::FromSocket { + peer_address: (Ipv4Addr::LOCALHOST, 3300).into(), + local_address: (Ipv4Addr::LOCALHOST, 443).into(), + original_destination_address: None, + }; + assert!(matches!( + Listener::select_filterchain(&m, &metadata, None, DetectedTransportProtocol::RawBuffer), + Ok(Some(0)) + )); } #[test] @@ -723,24 +838,64 @@ filter_chains: let metadata = DownstreamConnectionMetadata::FromSocket { peer_address: (Ipv4Addr::LOCALHOST, 33000).into(), local_address: (Ipv4Addr::LOCALHOST, 8443).into(), + original_destination_address: None, }; - assert_eq!(Listener::select_filterchain(&m, &metadata, None).unwrap().copied(), Some(3)); assert_eq!( - Listener::select_filterchain(&m, &metadata, Some("this.is.more.specific")).unwrap().copied(), + Listener::select_filterchain(&m, &metadata, None, DetectedTransportProtocol::RawBuffer).unwrap().copied(), + Some(3) + ); + assert_eq!( + Listener::select_filterchain( + &m, + &metadata, + Some("this.is.more.specific"), + DetectedTransportProtocol::RawBuffer + ) + .unwrap() + .copied(), Some(0) ); assert_eq!( - Listener::select_filterchain(&m, &metadata, Some("not.this.is.more.specific")).unwrap().copied(), + Listener::select_filterchain( + &m, + &metadata, + Some("not.this.is.more.specific"), + DetectedTransportProtocol::RawBuffer + ) + .unwrap() + .copied(), + Some(1) + ); + assert_eq!( + Listener::select_filterchain(&m, &metadata, Some("is.more.specific"), DetectedTransportProtocol::RawBuffer) + .unwrap() + .copied(), Some(1) ); - assert_eq!(Listener::select_filterchain(&m, &metadata, Some("is.more.specific")).unwrap().copied(), Some(1)); - assert_eq!(Listener::select_filterchain(&m, &metadata, Some("more.specific")).unwrap().copied(), Some(2)); assert_eq!( - Listener::select_filterchain(&m, &metadata, Some("this.is.less.specific")).unwrap().copied(), + Listener::select_filterchain(&m, &metadata, Some("more.specific"), DetectedTransportProtocol::RawBuffer) + .unwrap() + .copied(), + Some(2) + ); + assert_eq!( + Listener::select_filterchain( + &m, + &metadata, + Some("this.is.less.specific"), + DetectedTransportProtocol::RawBuffer + ) + .unwrap() + .copied(), Some(2) ); - assert_eq!(Listener::select_filterchain(&m, &metadata, Some("hello.world")).unwrap().copied(), Some(3)); + assert_eq!( + Listener::select_filterchain(&m, &metadata, Some("hello.world"), DetectedTransportProtocol::RawBuffer) + .unwrap() + .copied(), + Some(3) + ); } } diff --git a/orion-lib/src/listeners/listeners_manager.rs b/orion-lib/src/listeners/listeners_manager.rs index 1acf5a71..bc299e3a 100644 --- a/orion-lib/src/listeners/listeners_manager.rs +++ b/orion-lib/src/listeners/listeners_manager.rs @@ -161,7 +161,7 @@ mod tests { }; use super::*; - use orion_configuration::config::Listener as ListenerConfig; + use orion_configuration::config::{transport::BindDeviceOptions, Listener as ListenerConfig}; use tracing_test::traced_test; #[traced_test] @@ -181,7 +181,7 @@ mod tests { name: name.into(), address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), filter_chains: HashMap::default(), - bind_device: None, + bind_device_options: BindDeviceOptions::default(), with_tls_inspector: false, proxy_protocol_config: None, }; @@ -220,7 +220,7 @@ mod tests { name: name.into(), address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), filter_chains: HashMap::default(), - bind_device: None, + bind_device_options: BindDeviceOptions::default(), with_tls_inspector: false, proxy_protocol_config: None, }; diff --git a/orion-lib/src/listeners/tcp_proxy.rs b/orion-lib/src/listeners/tcp_proxy.rs index 2e94dbc8..d7bd362e 100644 --- a/orion-lib/src/listeners/tcp_proxy.rs +++ b/orion-lib/src/listeners/tcp_proxy.rs @@ -20,12 +20,13 @@ use crate::{ access_log::{log_access, log_access_reserve_balanced, Target}, - clusters::clusters_manager::{self, RoutingContext}, + clusters::{clusters_manager::{self, RoutingContext}}, listeners::{access_log::AccessLogContext, filter_state::DownstreamConnectionMetadata}, transport::connector::TcpErrorContext, AsyncStream, Result, }; use compact_str::ToCompactString; +use http::uri::Authority; use orion_configuration::config::{ cluster::ClusterSpecifier as ClusterSpecifierConfig, network_filters::{access_log::AccessLog, tcp_proxy::TcpProxy as TcpProxyConfig}, @@ -36,7 +37,7 @@ use orion_format::{ LogFormatterLocal, }; use std::{fmt, net::SocketAddr, sync::Arc, time::Instant}; -use tracing::{debug, error}; +use tracing::{debug, error, info}; #[derive(Debug, Clone)] pub struct TcpProxy { @@ -85,10 +86,19 @@ impl TcpProxy { access_loggers.with_context_fn(|| InitContext { start_time: std::time::SystemTime::now() }); + + + let cluster_selector = &self.cluster; + info!("Handling request TCP for {} {:?} {:?}", self.listener_name, cluster_selector, downstream_metadata); let cluster_id = clusters_manager::resolve_cluster(cluster_selector) .ok_or_else(|| "Failed to resolve cluster from specifier".to_owned())?; - let maybe_connector = clusters_manager::get_tcp_connection(cluster_id, RoutingContext::None); + + + let routing_context = RoutingContext::Authority(Authority::try_from(downstream_metadata.local_address().to_string())?, downstream_metadata.original_destination_address()); + + let maybe_connector = clusters_manager::get_tcp_connection(cluster_id, routing_context); + info!("Handling request TCP connector {maybe_connector:?}"); let mut bytes_received = 0; let mut bytes_sent = 0; diff --git a/orion-lib/src/secrets/secrets_manager.rs b/orion-lib/src/secrets/secrets_manager.rs index 231f46a6..d380de8e 100644 --- a/orion-lib/src/secrets/secrets_manager.rs +++ b/orion-lib/src/secrets/secrets_manager.rs @@ -51,23 +51,32 @@ impl TryFrom<&ValidationContext> for CertStore { type Error = crate::Error; fn try_from(validation_context: &ValidationContext) -> Result { - let mut ca_reader = validation_context.trusted_ca().into_buf_read()?; - let mut root_store = rustls::RootCertStore::empty(); - let ca_certs = certs(&mut ca_reader) - .map(|f| f.map_err(|e| format!("Can't parse certificate {e:?}").into())) - .collect::>>()?; - - if ca_certs.is_empty() { - return Err("No certificates have been configured".into()); - } + match validation_context{ + ValidationContext::TrustedCA(data_source) => { + let mut ca_reader = data_source.into_buf_read()?; + let mut root_store = rustls::RootCertStore::empty(); + let ca_certs = certs(&mut ca_reader) + .map(|f| f.map_err(|e| format!("Can't parse certificate {e:?}").into())) + .collect::>>()?; + + if ca_certs.is_empty() { + return Err("No certificates have been configured".into()); + } - let (good, bad) = root_store.add_parsable_certificates(ca_certs); - debug!("Added certs {good} rejected certs {bad}"); - if bad > 0 { - Err("Some certs in the trust store were invalid".into()) - } else { - Ok(CertStore { store: Arc::new(root_store), config: validation_context.clone() }) + let (good, bad) = root_store.add_parsable_certificates(ca_certs); + debug!("Added certs {good} rejected certs {bad}"); + if bad > 0 { + Err("Some certs in the trust store were invalid".into()) + } else { + Ok(CertStore { store: Arc::new(root_store), config: validation_context.clone() }) + } + }, + ValidationContext::None => { + let root_store = rustls::RootCertStore::empty(); + Ok(CertStore { store: Arc::new(root_store), config: validation_context.clone() }) + }, } + } } diff --git a/orion-lib/src/secrets/tls_configurator/configurator.rs b/orion-lib/src/secrets/tls_configurator/configurator.rs index c1dff452..96d65150 100644 --- a/orion-lib/src/secrets/tls_configurator/configurator.rs +++ b/orion-lib/src/secrets/tls_configurator/configurator.rs @@ -54,7 +54,7 @@ pub fn get_crypto_key_provider() -> Result<&'static dyn KeyProvider> { #[allow(dead_code)] #[derive(Debug)] -struct IgnoreCertVerifier(Verifier); +pub struct IgnoreCertVerifier(pub Verifier); impl ServerCertVerifier for IgnoreCertVerifier { fn verify_server_cert( diff --git a/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs b/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs index d0b29ee4..897a4397 100644 --- a/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs +++ b/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs @@ -25,6 +25,7 @@ use rustls::{ client::WebPkiServerVerifier, server::WebPkiClientVerifier, sign::CertifiedKey, ClientConfig, RootCertStore, ServerConfig, SupportedProtocolVersion, }; +use rustls_platform_verifier::Verifier; use tracing::{debug, warn}; use super::configurator::{get_crypto_key_provider, ClientCert, RelaxedResolvesServerCertUsingSni, ServerCert}; @@ -120,7 +121,7 @@ pub struct TlsContextBuilder { pub state: S, } -use crate::Result; +use crate::{secrets::tls_configurator::configurator::IgnoreCertVerifier, Result}; impl TlsContextBuilder<()> { pub fn with_supported_versions( @@ -244,16 +245,26 @@ impl TlsContextBuilder { impl TlsContextBuilder { pub fn build(&self) -> Result { let builder = ServerConfig::builder_with_protocol_versions(&self.state.supported_versions.clone()); - + warn!("TLS Server Context Builder {} {:?}",self.state.require_client_cert, self.state.certificate_store.as_ref().map(|s| s.len())); let verifier = match (self.state.require_client_cert, &self.state.certificate_store) { (true, None) => { return Err("requireClientCertificate is true but no validation_context is configured".into()); }, (true, Some(certificate_store)) => { - Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).build()?) + warn!("Certificate store is empty and require_client_cert is true"); + if certificate_store.is_empty(){ + None + }else{ + Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).build()?) + } }, (false, Some(certificate_store)) => { - Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).allow_unauthenticated().build()?) + warn!("Certificate store is empty and require_client_cert is false"); + if certificate_store.is_empty(){ + None + }else{ + Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).allow_unauthenticated().build()?) + } }, (false, None) => None, }; @@ -309,9 +320,15 @@ impl TlsContextBuilder { impl TlsContextBuilder { pub fn build(&self) -> Result { let builder = ClientConfig::builder_with_protocol_versions(&self.state.supported_versions.clone()); - - let verifier = WebPkiServerVerifier::builder(Arc::clone(&self.state.certificate_store)).build()?; - let builder = builder.with_webpki_verifier(verifier); + warn!("TLS Client Context Builder {}",self.state.certificate_store.len()); + let builder = if self.state.certificate_store.is_empty(){ + let verifier = WebPkiServerVerifier::builder(Arc::clone(&self.state.certificate_store)).build()?; + builder.with_webpki_verifier(verifier) + }else{ + warn!("TLS Client Context Builder using dangerous configuration to ignore server certificates"); + builder.dangerous().with_custom_certificate_verifier(Arc::new(IgnoreCertVerifier(Verifier::new()))) + }; + if let Some(ClientCert { key, certs: auth_certs }) = self.state.client_certificate.as_deref() { debug!("UpstreamContext : Selected Client Cert"); diff --git a/orion-lib/src/transport/bind_device.rs b/orion-lib/src/transport/bind_device.rs index 7ccffb03..3d4abff1 100644 --- a/orion-lib/src/transport/bind_device.rs +++ b/orion-lib/src/transport/bind_device.rs @@ -23,7 +23,7 @@ pub use orion_configuration::config::transport::BindDevice; #[cfg(target_os = "linux")] pub(crate) fn bind_device(s: &tokio::net::TcpSocket, binddev: &BindDevice) -> std::io::Result<()> { let name = binddev.interface(); - tracing::trace!("binding socket to dev {:?}", name); + tracing::trace!("binding socket to dev {:?}", name); s.bind_device(Some(name.to_bytes_with_nul())) } diff --git a/orion-lib/src/transport/connector.rs b/orion-lib/src/transport/connector.rs index 11950f34..172e8cb0 100644 --- a/orion-lib/src/transport/connector.rs +++ b/orion-lib/src/transport/connector.rs @@ -30,16 +30,17 @@ use std::{ use http::uri::Authority; use hyper::Uri; use hyper_util::rt::TokioIo; +use orion_configuration::config::transport::BindDeviceOptions; use orion_error::{Context, WithContext}; use orion_format::types::ResponseFlags; use pingora_timeout::fast_timeout::fast_timeout; use tokio::net::{TcpSocket, TcpStream}; use tower::Service; -use tracing::debug; +use tracing::{debug, warn}; use crate::clusters::retry_policy::{elapsed, EventError}; -use super::{bind_device::BindDevice, resolve}; +use super::{resolve}; #[derive(Debug, thiserror::Error)] pub enum ConnectError { @@ -59,7 +60,7 @@ pub struct TcpErrorContext { pub struct LocalConnectorWithDNSResolver { pub addr: Authority, pub cluster_name: &'static str, - pub bind_device: Option, + pub bind_device_options: BindDeviceOptions, pub timeout: Option, } @@ -68,7 +69,8 @@ impl LocalConnectorWithDNSResolver { &self, ) -> impl Future>> + 'static { let addr = self.addr.clone(); - let device = self.bind_device.clone(); + let device = self.bind_device_options.bind_device.clone(); + let bind_address = self.bind_device_options.bind_address.clone(); let cluster_name = self.cluster_name; let connection_timeout = self.timeout; @@ -138,7 +140,38 @@ impl LocalConnectorWithDNSResolver { .map_into() })?; } - + + if let Some(bind_addr) = bind_address{ + match bind_addr.address(){ + orion_configuration::config::core::envoy_conversions::Address::Socket(bind_address, _) => { + let maybe_socket_addr = format!("{bind_address}:0").parse::(); + let bind_address = maybe_socket_addr.map_err(|e| EventError::ConnectFailure(io::Error::new(io::ErrorKind::AddrNotAvailable, e.to_string()))).map_err(|e| { + WithContext::new(e) + .with_context_data(TcpErrorContext { + upstream_addr: addr, + response_flags: ResponseFlags::LOCAL_RESET, + cluster_name, + }) + .map_into() + })?; + + let maybe_error = sock.bind(bind_address); + debug!("LocalConnectorWithDNSResolver socket bound to {bind_address} {maybe_error:?}"); + maybe_error.map_err(|e| EventError::ConnectFailure(io::Error::new(io::ErrorKind::AddrNotAvailable, e.to_string()))).map_err(|e| { + WithContext::new(e) + .with_context_data(TcpErrorContext { + upstream_addr: addr, + response_flags: ResponseFlags::LOCAL_RESET, + cluster_name, + }) + .map_into() + })? + }, + + orion_configuration::config::core::envoy_conversions::Address::Pipe(_, _) => (), + } + } + let stream = if let Some(connection_timeout) = connection_timeout { fast_timeout(connection_timeout, sock.connect(addr)) .await // Result>, Elapsed> diff --git a/orion-lib/src/transport/http_channel.rs b/orion-lib/src/transport/http_channel.rs index 10af4cf4..a38c57af 100644 --- a/orion-lib/src/transport/http_channel.rs +++ b/orion-lib/src/transport/http_channel.rs @@ -19,7 +19,6 @@ // use super::{ - bind_device::BindDevice, connector::LocalConnectorWithDNSResolver, policy::{RequestContext, RequestExt}, }; @@ -45,10 +44,12 @@ use hyper_util::{ client::legacy::{connect::Connect, Builder, Client}, rt::tokio::{TokioExecutor, TokioTimer}, }; +use hyperlocal::UnixConnector; use opentelemetry::KeyValue; use orion_configuration::config::{ cluster::http_protocol_options::{Codec, HttpProtocolOptions}, - network_filters::http_connection_manager::RetryPolicy, + core::envoy_conversions::Address, + network_filters::http_connection_manager::RetryPolicy, transport::BindDeviceOptions, }; use orion_format::types::{ResponseFlagsLong, ResponseFlagsShort}; use orion_metrics::{metrics::clusters, with_metric}; @@ -65,7 +66,7 @@ use std::{ thread::ThreadId, time::{Duration, Instant}, }; -use tracing::debug; +use tracing::{debug, info, warn}; use webpki::types::ServerName; #[cfg(feature = "metrics")] @@ -110,6 +111,7 @@ pub struct HttpChannel { pub enum HttpChannelClient { Plain(Arc, Builder, LocalConnectorWithDNSResolver>>), Tls(ClientContext), + Unix(hyper::Uri, Arc>>), } impl HttpChannelClient { @@ -121,8 +123,9 @@ impl HttpChannelClient { #[derive(Default)] pub struct HttpChannelBuilder { tls: Option>, + address: Option
, authority: Option, - bind_device: Option, + bind_device_options: BindDeviceOptions, server_name: Option>, http_protocol_options: HttpProtocolOptions, connection_timeout: Option, @@ -142,8 +145,8 @@ impl LocalBuilder, Arc) -> Self { - Self { bind_device, ..Default::default() } + pub fn new(bind_device_options: BindDeviceOptions) -> Self { + Self { bind_device_options, ..Default::default() } } pub fn with_tls(self, tls_configurator: Option>) -> Self { @@ -158,6 +161,10 @@ impl HttpChannelBuilder { Self { authority: Some(authority), ..self } } + pub fn with_address(self, address: Address) -> Self { + Self { address: Some(address), ..self } + } + pub fn with_cluster_name(self, cluster_name: &'static str) -> Self { Self { cluster_name: Some(cluster_name), ..self } } @@ -171,7 +178,59 @@ impl HttpChannelBuilder { } #[allow(clippy::cast_sign_loss)] - pub fn build(self) -> crate::Result { + pub fn build(self) -> crate::Result { + match self.address { + Some(Address::Socket(_, _)) => self.build_channel_from_authority(), + Some(Address::Pipe(_, _)) => self.build_channel_from_pipe(), + None => Err(Error::from("Address is mandatory")), + } + } + + #[allow(clippy::cast_sign_loss)] + pub fn build_with_no_address(self) -> crate::Result { + self.build_channel_from_authority() + + } + + fn configure_hyper_client(&self) -> Builder { + let mut client_builder = Client::builder(TokioExecutor::new()); + client_builder + .timer(TokioTimer::new()) + .pool_idle_timeout(self.http_protocol_options.common.idle_timeout.unwrap_or(DEFAULT_IDLE_TIMEOUT)) + .pool_timer(TokioTimer::new()) + .pool_max_idle_per_host(usize::MAX) + .set_host(false); + + let configured_upstream_http_version = self.http_protocol_options.codec; + + self.configure_http2_if_needed(&mut client_builder, configured_upstream_http_version); + + client_builder + } + + fn configure_http2_if_needed(&self, client_builder: &mut Builder, version: Codec) { + if matches!(version, Codec::Http2) { + client_builder.http2_only(true); + let http2_options = &self.http_protocol_options.http2_options; + + if let Some(settings) = &http2_options.keep_alive_settings { + client_builder.http2_keep_alive_interval(settings.keep_alive_interval); + if let Some(timeout) = settings.keep_alive_timeout { + client_builder.http2_keep_alive_timeout(timeout); + } + client_builder.http2_keep_alive_while_idle(true); + } + + client_builder.http2_initial_connection_window_size(http2_options.initial_connection_window_size()); + client_builder.http2_initial_stream_window_size(http2_options.initial_stream_window_size()); + + if let Some(max) = http2_options.max_concurrent_streams() { + client_builder.http2_max_concurrent_reset_streams(max); + } + } + } + + fn build_channel_from_authority(self) -> crate::Result { let authority = self.authority.clone().ok_or_else(|| Error::from("Authority is mandatory"))?; let client_builder = self.configure_hyper_client(); @@ -191,7 +250,7 @@ impl HttpChannelBuilder { let connector = LocalConnectorWithDNSResolver { addr: authority.clone(), cluster_name: self.cluster_name.unwrap_or_default(), - bind_device: self.bind_device, + bind_device_options: self.bind_device_options, timeout: self.connection_timeout, }; @@ -213,7 +272,7 @@ impl HttpChannelBuilder { // Build plain client inline let connector = LocalConnectorWithDNSResolver { addr: authority.clone(), - bind_device: self.bind_device, + bind_device_options: self.bind_device_options, timeout: self.connection_timeout, cluster_name: self.cluster_name.unwrap_or_default(), }; @@ -227,91 +286,94 @@ impl HttpChannelBuilder { } } - fn configure_hyper_client(&self) -> Builder { - let mut client_builder = Client::builder(TokioExecutor::new()); - client_builder - .timer(TokioTimer::new()) - .pool_idle_timeout(self.http_protocol_options.common.idle_timeout.unwrap_or(DEFAULT_IDLE_TIMEOUT)) - .pool_timer(TokioTimer::new()) - .pool_max_idle_per_host(usize::MAX) - .set_host(false); - - let configured_upstream_http_version = self.http_protocol_options.codec; - - self.configure_http2_if_needed(&mut client_builder, configured_upstream_http_version); - - client_builder - } - - fn configure_http2_if_needed(&self, client_builder: &mut Builder, version: Codec) { - if matches!(version, Codec::Http2) { - client_builder.http2_only(true); - let http2_options = &self.http_protocol_options.http2_options; - - if let Some(settings) = &http2_options.keep_alive_settings { - client_builder.http2_keep_alive_interval(settings.keep_alive_interval); - if let Some(timeout) = settings.keep_alive_timeout { - client_builder.http2_keep_alive_timeout(timeout); - } - client_builder.http2_keep_alive_while_idle(true); - } - - client_builder.http2_initial_connection_window_size(http2_options.initial_connection_window_size()); - client_builder.http2_initial_stream_window_size(http2_options.initial_stream_window_size()); - - if let Some(max) = http2_options.max_concurrent_streams() { - client_builder.http2_max_concurrent_reset_streams(max); - } + fn build_channel_from_pipe(self) -> crate::Result { + use hyperlocal::Uri; + + match &self.address { + Some(Address::Pipe(name, _)) => { + let _client_builder = self.configure_hyper_client(); + warn!("Building address from a pipe {name}"); + let uri: hyper::Uri = Uri::new(name.clone(), "").into(); + let authority = uri.authority().cloned().unwrap_or(Authority::from_static("none")); + warn!("Building address from a pipe {uri:?}"); + Ok(HttpChannel { + //client: HttpChannelClient::Unix(uri, Arc::new(client_builder.build(UnixConnector))), + client: HttpChannelClient::Unix( + uri, + Arc::new( + hyper_util::client::legacy::Client::builder(TokioExecutor::new()) + .http2_only(true) + .build(UnixConnector), + ), + ), + http_version: self.http_protocol_options.codec, + upstream_authority: authority, + cluster_name: self.cluster_name.unwrap_or_default(), + }) + }, + _ => Err(Error::from("Trying to build a pipe address from invalid address")), } } -} -#[cfg(feature = "metrics")] -fn update_upstream_stats(event: ConnectionEvent, key: &dyn Any, tag: &dyn Tag) { - use tracing::info; - let cluster_name = *(tag.as_any().downcast_ref::<&str>().unwrap_or(&"")); - let shard_id = std::thread::current().id(); - if let Some(pk) = key.downcast_ref::() { - info!("HttpClient: {:?} for cluster {:?} (pool_key: {:?})", event, cluster_name, pk); - } - match event { - ConnectionEvent::NewConnection => { - with_metric!(clusters::UPSTREAM_CX_TOTAL, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - with_metric!(clusters::UPSTREAM_CX_ACTIVE, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - }, - ConnectionEvent::IdleConnectionClosed => { - with_metric!(clusters::UPSTREAM_CX_DESTROY, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - with_metric!( - clusters::UPSTREAM_CX_IDLE_TIMEOUT, - add, - 1, - shard_id, - &[KeyValue::new("cluster", cluster_name)] - ); - with_metric!(clusters::UPSTREAM_CX_ACTIVE, sub, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - }, - ConnectionEvent::ConnectionError => { - with_metric!( - clusters::UPSTREAM_CX_CONNECT_FAIL, - add, - 1, - shard_id, - &[KeyValue::new("cluster", cluster_name)] - ); - }, - ConnectionEvent::ConnectionTimeout => { - with_metric!( - clusters::UPSTREAM_CX_CONNECT_TIMEOUT, - add, - 1, - shard_id, - &[KeyValue::new("cluster", cluster_name)] - ); - }, - ConnectionEvent::ConnectionClosed => { - with_metric!(clusters::UPSTREAM_CX_DESTROY, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - with_metric!(clusters::UPSTREAM_CX_ACTIVE, sub, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); - }, + #[cfg(feature = "metrics")] + fn update_upstream_stats(event: ConnectionEvent, key: &dyn Any, tag: &dyn Tag) { + use tracing::info; + let cluster_name = *(tag.as_any().downcast_ref::<&str>().unwrap_or(&"")); + let shard_id = std::thread::current().id(); + if let Some(pk) = key.downcast_ref::() { + info!("HttpClient: {:?} for cluster {:?} (pool_key: {:?})", event, cluster_name, pk); + } + match event { + ConnectionEvent::NewConnection => { + with_metric!(clusters::UPSTREAM_CX_TOTAL, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); + with_metric!(clusters::UPSTREAM_CX_ACTIVE, add, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); + }, + ConnectionEvent::IdleConnectionClosed => { + with_metric!( + clusters::UPSTREAM_CX_DESTROY, + add, + 1, + shard_id, + &[KeyValue::new("cluster", cluster_name)] + ); + with_metric!( + clusters::UPSTREAM_CX_IDLE_TIMEOUT, + add, + 1, + shard_id, + &[KeyValue::new("cluster", cluster_name)] + ); + with_metric!(clusters::UPSTREAM_CX_ACTIVE, sub, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); + }, + ConnectionEvent::ConnectionError => { + with_metric!( + clusters::UPSTREAM_CX_CONNECT_FAIL, + add, + 1, + shard_id, + &[KeyValue::new("cluster", cluster_name)] + ); + }, + ConnectionEvent::ConnectionTimeout => { + with_metric!( + clusters::UPSTREAM_CX_CONNECT_TIMEOUT, + add, + 1, + shard_id, + &[KeyValue::new("cluster", cluster_name)] + ); + }, + ConnectionEvent::ConnectionClosed => { + with_metric!( + clusters::UPSTREAM_CX_DESTROY, + add, + 1, + shard_id, + &[KeyValue::new("cluster", cluster_name)] + ); + with_metric!(clusters::UPSTREAM_CX_ACTIVE, sub, 1, shard_id, &[KeyValue::new("cluster", cluster_name)]); + }, + } } } @@ -360,6 +422,26 @@ impl<'a> RequestHandler>>> for HttpChannel::handle_response(result, route_timeout, version) }, + HttpChannelClient::Unix(uri, sender) => { + let RequestContext { route_timeout, retry_policy } = request.ctx.clone(); + let client = sender; + let mut req = request.req; + let path_and_query = req.uri().path_and_query().cloned(); + info!("Using UNIX channel and rewriting uris {} {}", req.uri(), uri); + let mut parts = uri.clone().into_parts(); + parts.path_and_query = path_and_query; + *req.uri_mut() = Uri::from_parts(parts).expect("We do expect this to work"); + + let result = if let Some(t) = route_timeout { + match fast_timeout(t, self.send_request(retry_policy, client, req, cluster_name)).await { + Ok(result) => result, + Err(_) => (Err(EventError::RouteTimeout.into()), t), + } + } else { + self.send_request(retry_policy, client, req, cluster_name).await + }; + HttpChannel::handle_response(result, route_timeout, version) + }, } } } @@ -570,6 +652,7 @@ impl HttpChannel { pub fn is_https(&self) -> bool { match &self.client { HttpChannelClient::Plain(_) => false, + HttpChannelClient::Unix(_, _) => false, HttpChannelClient::Tls(_) => true, } } @@ -581,6 +664,7 @@ impl HttpChannel { pub fn load(&self) -> u32 { let load = match &self.client { HttpChannelClient::Plain(sender) => Arc::strong_count(sender.get_or_build()), + HttpChannelClient::Unix(_, sender) => Arc::strong_count(sender), HttpChannelClient::Tls(sender) => Arc::strong_count(sender.client.get_or_build()), }; u32::try_from(load).unwrap_or(u32::MAX) diff --git a/orion-lib/src/transport/proxy_protocol.rs b/orion-lib/src/transport/proxy_protocol.rs index 5a7b2031..e7377f66 100644 --- a/orion-lib/src/transport/proxy_protocol.rs +++ b/orion-lib/src/transport/proxy_protocol.rs @@ -87,7 +87,11 @@ impl ProxyProtocolReader { }, PolicyAction::TransparentPassthrough => { return Ok(( - DownstreamConnectionMetadata::FromSocket { peer_address, local_address }, + DownstreamConnectionMetadata::FromSocket { + peer_address, + local_address, + original_destination_address: None, + }, Box::new(stream.into_rewound_stream()), )); }, @@ -230,7 +234,11 @@ impl ProxyProtocolReader { SocketAddr::new(IpAddr::V6(ip.destination_address), ip.destination_port), ), v1::Addresses::Unknown => { - return Ok(DownstreamConnectionMetadata::FromSocket { peer_address, local_address }); + return Ok(DownstreamConnectionMetadata::FromSocket { + peer_address, + local_address, + original_destination_address: None, + }); }, }; Ok(DownstreamConnectionMetadata::FromProxyProtocol { @@ -259,7 +267,11 @@ impl ProxyProtocolReader { return Err(Error::new(format!("Unix socket addresses are not supported: {unix:?}"))); }, v2::Addresses::Unspecified => { - return Ok(DownstreamConnectionMetadata::FromSocket { peer_address, local_address }); + return Ok(DownstreamConnectionMetadata::FromSocket { + peer_address, + local_address, + original_destination_address: None, + }); }, }; let mut tlv_data = HashMap::new(); diff --git a/orion-lib/src/transport/resolver.rs b/orion-lib/src/transport/resolver.rs index c5d24419..0acb99d9 100644 --- a/orion-lib/src/transport/resolver.rs +++ b/orion-lib/src/transport/resolver.rs @@ -28,6 +28,9 @@ use hickory_resolver::{name_server::TokioConnectionProvider, TokioAsyncResolver} static GLOBAL_DNS_RESOLVER: OnceLock = OnceLock::new(); pub async fn resolve(host: &str, port: u16) -> io::Result { + if let Ok(addr) = (host.to_owned() + ":" + &port.to_string()).parse::() { + return Ok(addr); + } match GLOBAL_DNS_RESOLVER .get_or_init(|| -> TokioAsyncResolver { // The TokioAsyncResolver needs a Tokio runtime already running. By encapsulating the diff --git a/orion-lib/src/transport/tcp_channel.rs b/orion-lib/src/transport/tcp_channel.rs index 8c626f9b..422f03fc 100644 --- a/orion-lib/src/transport/tcp_channel.rs +++ b/orion-lib/src/transport/tcp_channel.rs @@ -21,7 +21,7 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use super::{ - bind_device::BindDevice, connector::LocalConnectorWithDNSResolver, AsyncStream, UpstreamTransportSocketConfigurator, + connector::LocalConnectorWithDNSResolver, AsyncStream, UpstreamTransportSocketConfigurator, }; use crate::{ listeners::filter_state::DownstreamConnectionMetadata, @@ -29,6 +29,7 @@ use crate::{ }; use futures::future::BoxFuture; use http::uri::Authority; +use orion_configuration::config::transport::BindDeviceOptions; use rustls::ClientConfig; use tokio::net::TcpStream; use tokio_rustls::TlsConnector; @@ -51,12 +52,12 @@ impl TcpChannelConnector { pub fn new( authority: &Authority, cluster_name: &'static str, - bind_device: Option, + bind_device_options: BindDeviceOptions, timeout: Option, transport_socket: UpstreamTransportSocketConfigurator, ) -> Self { Self { - connector: LocalConnectorWithDNSResolver { addr: authority.clone(), cluster_name, bind_device, timeout }, + connector: LocalConnectorWithDNSResolver { addr: authority.clone(), cluster_name, bind_device_options, timeout }, transport_socket, } } diff --git a/orion-metrics/src/lib.rs b/orion-metrics/src/lib.rs index 80f31eb8..a36f6dbb 100644 --- a/orion-metrics/src/lib.rs +++ b/orion-metrics/src/lib.rs @@ -22,7 +22,6 @@ pub mod macros; pub mod metrics; pub mod sharded; - use orion_configuration::config::{metrics::StatsSink, Bootstrap}; use serde::{Deserialize, Serialize}; diff --git a/orion-metrics/src/metrics/server.rs b/orion-metrics/src/metrics/server.rs index 89dbde35..3d498613 100644 --- a/orion-metrics/src/metrics/server.rs +++ b/orion-metrics/src/metrics/server.rs @@ -33,6 +33,7 @@ pub static MEMORY_HEAP_SIZE: OnceLock>> = OnceLock:: pub static MEMORY_PHYSICAL_SIZE: OnceLock>> = OnceLock::new(); pub static MEMORY_ALLOCATED: OnceLock>> = OnceLock::new(); + #[cfg(feature = "metrics")] const SERVER_PREFIX: &str = "orion.server"; @@ -124,7 +125,7 @@ pub(crate) fn init_server_metrics(number_of_threads: usize) { } #[cfg(feature = "metrics")] -mod util { +pub mod util { /// Return the physical memory allocated by the process. /// pub(crate) fn get_memory_physical_size() -> Option { @@ -163,7 +164,7 @@ mod util { } } - pub(crate) fn server_uptime() -> u64 { + pub fn server_uptime() -> u64 { use std::time::Instant; let start_up_time = super::STARTUP_TIME.get().copied().unwrap_or_else(Instant::now); start_up_time.elapsed().as_secs() diff --git a/orion-proxy/Cargo.toml b/orion-proxy/Cargo.toml index b6a5f382..108e154f 100644 --- a/orion-proxy/Cargo.toml +++ b/orion-proxy/Cargo.toml @@ -11,6 +11,7 @@ name = "orion" path = "src/main.rs" [features] +default = ["prometheus", "config-dump"] config-dump = [] dhat-heap = ["dep:dhat"] jemalloc = ["dep:tikv-jemallocator", "orion-metrics/jemalloc"] diff --git a/orion-proxy/conf/envoy-rev.json b/orion-proxy/conf/envoy-rev.json new file mode 100644 index 00000000..b2bec842 --- /dev/null +++ b/orion-proxy/conf/envoy-rev.json @@ -0,0 +1,441 @@ +{ + "application_log_config": { + "log_format": { + "text_format": "%Y-%m-%dT%T.%fZ\t%l\tenvoy %n %g:%#\t%v\tthread=%t" + } + }, + "node": { + "id": "sidecar~10.244.0.19~details-v1-766844796b-2llwc.default~default.svc.cluster.local", + "cluster": "details.default", + "locality": { + }, + "metadata": {"ANNOTATIONS":{"istio.io/rev":"default","kubectl.kubernetes.io/default-container":"details","kubectl.kubernetes.io/default-logs-container":"details","kubernetes.io/config.seen":"2025-09-09T09:16:26.431061397Z","kubernetes.io/config.source":"api","prometheus.io/path":"/stats/prometheus","prometheus.io/port":"15020","prometheus.io/scrape":"true","sidecar.istio.io/status":"{\"initContainers\":[\"istio-init\",\"istio-proxy\"],\"containers\":null,\"volumes\":[\"workload-socket\",\"credential-socket\",\"workload-certs\",\"istio-envoy\",\"istio-data\",\"istio-podinfo\",\"istio-token\",\"istiod-ca-cert\",\"istio-ca-crl\"],\"imagePullSecrets\":null,\"revision\":\"default\"}"},"APP_CONTAINERS":"details","CLUSTER_ID":"Kubernetes","ENVOY_PROMETHEUS_PORT":15090,"ENVOY_SKIP_DEPRECATED_LOGS":"true","ENVOY_STATUS_PORT":15021,"INSTANCE_IPS":"10.244.0.19","INTERCEPTION_MODE":"REDIRECT","ISTIO_PROXY_SHA":"dd6dd2104dc107fd5f5da434f1a0424ec1099943","ISTIO_VERSION":"1.28-dev","LABELS":{"app":"details","security.istio.io/tlsMode":"istio","service.istio.io/canonical-name":"details","service.istio.io/canonical-revision":"v1","version":"v1"},"MESH_ID":"cluster.local","METADATA_DISCOVERY":"false","NAME":"details-v1-766844796b-2llwc","NAMESPACE":"default","NODE_NAME":"envoy-gateway-control-plane","OWNER":"kubernetes://apis/apps/v1/namespaces/default/deployments/details-v1","PILOT_SAN":["istiod.istio-system.svc"],"POD_PORTS":"[{\"containerPort\":9080,\"protocol\":\"TCP\"}]","PROXY_CONFIG":{"binaryPath":"/usr/local/bin/orion","concurrency":2,"configPath":"./etc/istio/proxy","controlPlaneAuthPolicy":"MUTUAL_TLS","discoveryAddress":"istiod.istio-system.svc:15012","drainDuration":"45s","proxyAdminPort":15000,"serviceCluster":"istio-proxy","statNameLength":189,"statusPort":15020,"terminationDrainDuration":"5s"},"SERVICE_ACCOUNT":"bookinfo-details","WORKLOAD_IDENTITY_SOCKET_FILE":"socket","WORKLOAD_NAME":"details-v1"} + }, + "layered_runtime": { + "layers": [ + { + "name": "global config", + "static_layer": {"envoy.deprecated_features:envoy.config.listener.v3.Listener.hidden_envoy_deprecated_use_original_dst":true,"envoy.reloadable_features.http_reject_path_with_fragment":false,"overload.global_downstream_max_connections":"2147483647","re2.max_program_size.error_level":"32768"} + }, + { + "name": "admin", + "admin_layer": {} + } + ] + }, + "bootstrap_extensions": [ + { + "name": "envoy.bootstrap.internal_listener", + "typed_config": { + "@type":"type.googleapis.com/udpa.type.v1.TypedStruct", + "type_url": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener", + "value": { + "buffer_size_kb": 64 + } + } + } + ], + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster(\\.(.+);)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]\\w]*|[_\\[\\]aAbBcCdDeEfF[:digit:]\\w\\:]*));\\.)" + }, + { + "tag_name": "thread_name", + "regex": "^server(\\.(.+))\\.watchdog" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "regex": "_rq(_(\\d{3}))$", + "tag_name": "response_code" + }, + { + "tag_name": "response_code_class", + "regex": "_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + }, + { + "regex": "(cache\\.(.+?)\\.)", + "tag_name": "cache" + }, + { + "regex": "(component\\.(.+?)\\.)", + "tag_name": "component" + }, + { + "regex": "(tag\\.(.+?);\\.)", + "tag_name": "tag" + }, + { + "regex": "(wasm_filter\\.(.+?)\\.)", + "tag_name": "wasm_filter" + }, + { + "tag_name": "authz_enforce_result", + "regex": "rbac(\\.(allowed|denied))" + }, + { + "tag_name": "authz_dry_run_action", + "regex": "(\\.istio_dry_run_(allow|deny)_)" + }, + { + "tag_name": "authz_dry_run_result", + "regex": "(\\.shadow_(allowed|denied))" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "prefix": "reporter=" + }, + { + "prefix": "cluster_manager" + }, + { + "prefix": "listener_manager" + }, + { + "prefix": "server" + }, + { + "prefix": "cluster.xds-grpc" + }, + { + "prefix": "wasm" + }, + { + "suffix": "rbac.allowed" + }, + { + "suffix": "rbac.denied" + }, + { + "suffix": "shadow_allowed" + }, + { + "suffix": "shadow_denied" + }, + { + "safe_regex": {"regex":"vhost\\..*\\.route\\..*"} + }, + { + "prefix": "component" + }, + { + "prefix": "istio" + } + ] + } + } + }, + "admin": { + "access_log": [ + { + "name": "envoy.access_loggers.file", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog", + "path": "/dev/null" + } + } + ], + "profile_path": "/var/lib/istio/data/envoy.prof", + "address": { + "socket_address": { + "address": "127.0.0.1", + "port_value": 15000 + } + } + }, + "dynamic_resources": { + "lds_config": { + "ads": {}, + "initial_fetch_timeout": "0s", + "resource_api_version": "V3" + }, + "cds_config": { + "ads": {}, + "initial_fetch_timeout": "0s", + "resource_api_version": "V3" + }, + "ads_config": { + "api_type": "DELTA_GRPC", + "set_node_on_first_message_only": true, + "transport_api_version": "V3", + "grpc_services": [ + { + "envoy_grpc": { + "cluster_name": "xds-grpc" + } + } + ] + } + }, + "static_resources": { + "clusters": [ + { + "name": "prometheus_stats", + "alt_stat_name": "prometheus_stats;", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", + "load_assignment": { + "cluster_name": "prometheus_stats", + "endpoints": [{ + "lb_endpoints": [{ + "endpoint": { + "address":{ + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": 15000 + } + } + } + }] + }] + } + }, + { + "name": "agent", + "alt_stat_name": "agent;", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", + "load_assignment": { + "cluster_name": "agent", + "endpoints": [{ + "lb_endpoints": [{ + "endpoint": { + "address":{ + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": 15020 + } + } + } + }] + }] + } + }, + { + "name": "sds-grpc", + "alt_stat_name": "sds-grpc;", + "type": "STATIC", + "typed_extension_protocol_options": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicit_http_config": { + "http2_protocol_options": {} + } + } + }, + "connect_timeout": "1s", + "lb_policy": "ROUND_ROBIN", + "load_assignment": { + "cluster_name": "sds-grpc", + "endpoints": [{ + "lb_endpoints": [{ + "endpoint": { + "address":{ + "pipe": { + "path": "./var/run/secrets/workload-spiffe-uds/socket" + } + } + } + }] + }] + } + }, + { + "name": "xds-grpc", + "alt_stat_name": "xds-grpc;", + "type" : "STATIC", + "connect_timeout": "1s", + "lb_policy": "ROUND_ROBIN", + "load_assignment": { + "cluster_name": "xds-grpc", + "endpoints": [{ + "lb_endpoints": [{ + "endpoint": { + "address":{ + "pipe": { + "path": "./etc/istio/proxy/XDS" + } + } + } + }] + }] + }, + "circuit_breakers": { + "thresholds": [ + { + "priority": "DEFAULT", + "max_connections": 100000, + "max_pending_requests": 100000, + "max_requests": 100000 + }, + { + "priority": "HIGH", + "max_connections": 100000, + "max_pending_requests": 100000, + "max_requests": 100000 + } + ] + }, + "upstream_connection_options": { + "tcp_keepalive": { + "keepalive_time": 300 + } + }, + "max_requests_per_connection": 1, + "typed_extension_protocol_options": { + "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": { + "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions", + "explicit_http_config": { + "http2_protocol_options": {} + } + } + } + } + + + ], + "listeners":[ + { + "name": "0.0.0.0_15090", + + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + + "port_value": 15090 + } + }, + "ignore_global_conn_limit": true, + "bypass_overload_manager": true, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/stats/prometheus" + }, + "route": { + "cluster": "prometheus_stats" + } + } + ] + } + ] + }, + "http_filters": [ + { + "name": "envoy.filters.http.router", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + }] + } + } + ] + } + ] + }, + { + "name": "0.0.0.0_15021", + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": 15021 + } + }, + "ignore_global_conn_limit": true, + "bypass_overload_manager": true, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "codec_type": "AUTO", + "stat_prefix": "agent", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/healthz/ready" + }, + "route": { + "cluster": "agent" + } + } + ] + } + ] + }, + "http_filters": [{ + "name": "envoy.filters.http.router", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + }] + } + } + ] + } + ] + } + ] + } + + + , + "deferred_stat_options": { + "enable_deferred_creation_stats": true + } +} diff --git a/orion-proxy/conf/envoy-rev.yaml b/orion-proxy/conf/envoy-rev.yaml new file mode 100644 index 00000000..895f80f0 --- /dev/null +++ b/orion-proxy/conf/envoy-rev.yaml @@ -0,0 +1,307 @@ +application_log_config: + log_format: + text_format: "%Y-%m-%dT%T.%fZ\t%l\tenvoy %n %g:%#\t%v\tthread=%t" +node: + id: >- + sidecar~10.244.0.19~details-v1-766844796b-2llwc.default~default.svc.cluster.local + cluster: details.default + locality: {} + metadata: + ANNOTATIONS: + istio.io/rev: default + kubectl.kubernetes.io/default-container: details + kubectl.kubernetes.io/default-logs-container: details + kubernetes.io/config.seen: '2025-09-09T09:16:26.431061397Z' + kubernetes.io/config.source: api + prometheus.io/path: /stats/prometheus + prometheus.io/port: '15020' + prometheus.io/scrape: 'true' + sidecar.istio.io/status: >- + {"initContainers":["istio-init","istio-proxy"],"containers":null,"volumes":["workload-socket","credential-socket","workload-certs","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert","istio-ca-crl"],"imagePullSecrets":null,"revision":"default"} + APP_CONTAINERS: details + CLUSTER_ID: Kubernetes + ENVOY_PROMETHEUS_PORT: 15090 + ENVOY_SKIP_DEPRECATED_LOGS: 'true' + ENVOY_STATUS_PORT: 15021 + INSTANCE_IPS: 10.244.0.19 + INTERCEPTION_MODE: REDIRECT + ISTIO_PROXY_SHA: dd6dd2104dc107fd5f5da434f1a0424ec1099943 + ISTIO_VERSION: 1.28-dev + LABELS: + app: details + security.istio.io/tlsMode: istio + service.istio.io/canonical-name: details + service.istio.io/canonical-revision: v1 + version: v1 + MESH_ID: cluster.local + METADATA_DISCOVERY: 'false' + NAME: details-v1-766844796b-2llwc + NAMESPACE: default + NODE_NAME: envoy-gateway-control-plane + OWNER: 'kubernetes://apis/apps/v1/namespaces/default/deployments/details-v1' + PILOT_SAN: + - istiod.istio-system.svc + POD_PORTS: '[{"containerPort":9080,"protocol":"TCP"}]' + PROXY_CONFIG: + binaryPath: /usr/local/bin/orion + concurrency: 2 + configPath: ./etc/istio/proxy + controlPlaneAuthPolicy: MUTUAL_TLS + discoveryAddress: 'istiod.istio-system.svc:15012' + drainDuration: 45s + proxyAdminPort: 15000 + serviceCluster: istio-proxy + statNameLength: 189 + statusPort: 15020 + terminationDrainDuration: 5s + SERVICE_ACCOUNT: bookinfo-details + WORKLOAD_IDENTITY_SOCKET_FILE: socket + WORKLOAD_NAME: details-v1 +layered_runtime: + layers: + - name: global config + static_layer: + 'envoy.deprecated_features:envoy.config.listener.v3.Listener.hidden_envoy_deprecated_use_original_dst': true + envoy.reloadable_features.http_reject_path_with_fragment: false + overload.global_downstream_max_connections: '2147483647' + re2.max_program_size.error_level: '32768' + - name: admin + admin_layer: {} +bootstrap_extensions: + - name: envoy.bootstrap.internal_listener + typed_config: + '@type': type.googleapis.com/udpa.type.v1.TypedStruct + type_url: >- + type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener + value: + buffer_size_kb: 64 +stats_config: + use_all_default_tags: false + stats_tags: + - tag_name: cluster_name + regex: ^cluster(\.(.+);) + - tag_name: http_conn_manager_prefix + regex: '^http\.(((?:[_.[:digit:]\w]*|[_\[\]aAbBcCdDeEfF[:digit:]\w\:]*));\.)' + - tag_name: thread_name + regex: ^server(\.(.+))\.watchdog + - tag_name: tcp_prefix + regex: ^tcp\.((.*?)\.)\w+?$ + - regex: '_rq(_(\d{3}))$' + tag_name: response_code + - tag_name: response_code_class + regex: _rq(_(\dxx))$ + - tag_name: http_conn_manager_listener_prefix + regex: >- + ^listener(?=\.).*?\.http\.(((?:[_.[:digit:]]*|[_\[\]aAbBcCdDeEfF[:digit:]]*))\.) + - tag_name: listener_address + regex: '^listener\.(((?:[_.[:digit:]]*|[_\[\]aAbBcCdDeEfF[:digit:]]*))\.)' + - tag_name: mongo_prefix + regex: ^mongo\.(.+?)\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$ + - regex: (cache\.(.+?)\.) + tag_name: cache + - regex: (component\.(.+?)\.) + tag_name: component + - regex: (tag\.(.+?);\.) + tag_name: tag + - regex: (wasm_filter\.(.+?)\.) + tag_name: wasm_filter + - tag_name: authz_enforce_result + regex: rbac(\.(allowed|denied)) + - tag_name: authz_dry_run_action + regex: (\.istio_dry_run_(allow|deny)_) + - tag_name: authz_dry_run_result + regex: (\.shadow_(allowed|denied)) + stats_matcher: + inclusion_list: + patterns: + - prefix: reporter= + - prefix: cluster_manager + - prefix: listener_manager + - prefix: server + - prefix: cluster.xds-grpc + - prefix: wasm + - suffix: rbac.allowed + - suffix: rbac.denied + - suffix: shadow_allowed + - suffix: shadow_denied + - safe_regex: + regex: vhost\..*\.route\..* + - prefix: component + - prefix: istio +admin: + access_log: + - name: envoy.access_loggers.file + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/null + profile_path: /var/lib/istio/data/envoy.prof + address: + socket_address: + address: 127.0.0.1 + port_value: 15000 +dynamic_resources: + lds_config: + ads: {} + initial_fetch_timeout: 0s + resource_api_version: V3 + cds_config: + ads: {} + initial_fetch_timeout: 0s + resource_api_version: V3 + ads_config: + api_type: DELTA_GRPC + set_node_on_first_message_only: true + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: xds-grpc +static_resources: + clusters: + - name: prometheus_stats + alt_stat_name: prometheus_stats; + type: STATIC + connect_timeout: 0.250s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: prometheus_stats + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 15000 + - name: agent + alt_stat_name: agent; + type: STATIC + connect_timeout: 0.250s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: agent + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 15020 + - name: sds-grpc + alt_stat_name: sds-grpc; + type: STATIC + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': >- + type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + connect_timeout: 1s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: sds-grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: ./var/run/secrets/workload-spiffe-uds/socket + - name: xds-grpc + alt_stat_name: xds-grpc; + type: STATIC + connect_timeout: 1s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: xds-grpc + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: ./etc/istio/proxy/XDS + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 100000 + max_pending_requests: 100000 + max_requests: 100000 + - priority: HIGH + max_connections: 100000 + max_pending_requests: 100000 + max_requests: 100000 + upstream_connection_options: + tcp_keepalive: + keepalive_time: 300 + max_requests_per_connection: 1 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': >- + type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + listeners: + - name: 0.0.0.0_15090 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 15090 + ignore_global_conn_limit: true + bypass_overload_manager: true + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: stats + route_config: + virtual_hosts: + - name: backend + domains: + - '*' + routes: + - match: + prefix: /stats/prometheus + route: + cluster: prometheus_stats + http_filters: + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + - name: 0.0.0.0_15021 + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 15021 + ignore_global_conn_limit: true + bypass_overload_manager: true + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: agent + route_config: + virtual_hosts: + - name: backend + domains: + - '*' + routes: + - match: + prefix: /healthz/ready + route: + cluster: agent + http_filters: + - name: envoy.filters.http.router + typed_config: + '@type': >- + type.googleapis.com/envoy.extensions.filters.http.router.v3.Router +deferred_stat_options: + enable_deferred_creation_stats: true diff --git a/orion-proxy/conf/envoy-runtime-pipe.yaml b/orion-proxy/conf/envoy-runtime-pipe.yaml new file mode 100644 index 00000000..b6a4fcc3 --- /dev/null +++ b/orion-proxy/conf/envoy-runtime-pipe.yaml @@ -0,0 +1,86 @@ + +static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/direct-response" + headers: + - name: ":method" + exactMatch: "GET" + direct_response: + status: 200 + body: + inline_string: "meow! 🐱" + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "GET" + route: + cluster: cluster_http_1 + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "POST" + route: + cluster: cluster_http_2 + + clusters: + - name: cluster_http_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: cluster_http_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + + + - name: cluster_http_2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: cluster_http_2 + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock diff --git a/orion-proxy/conf/orion-conf.yaml b/orion-proxy/conf/orion-conf.yaml new file mode 100644 index 00000000..61590503 --- /dev/null +++ b/orion-proxy/conf/orion-conf.yaml @@ -0,0 +1,11 @@ +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "debug" + # log_directory: "." + # log_file: "orion.log" diff --git a/orion-proxy/conf/orion-runtime-bare.yaml b/orion-proxy/conf/orion-runtime-bare.yaml new file mode 100644 index 00000000..61590503 --- /dev/null +++ b/orion-proxy/conf/orion-runtime-bare.yaml @@ -0,0 +1,11 @@ +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "debug" + # log_directory: "." + # log_file: "orion.log" diff --git a/orion-proxy/conf/orion-runtime-pipe.yaml b/orion-proxy/conf/orion-runtime-pipe.yaml new file mode 100644 index 00000000..ee62a722 --- /dev/null +++ b/orion-proxy/conf/orion-runtime-pipe.yaml @@ -0,0 +1,98 @@ +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "trace" +# log_directory: "." +# log_file: "orion.log" + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/direct-response" + headers: + - name: ":method" + exactMatch: "GET" + direct_response: + status: 200 + body: + inline_string: "meow! 🐱" + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "GET" + route: + cluster: cluster_http_1 + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "POST" + route: + cluster: cluster_http_2 + + clusters: + - name: cluster_http_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: cluster_http_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + + - name: cluster_http_2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: cluster_http_2 + endpoints: + - lb_endpoints: + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock + - endpoint: + address: + pipe: + path: /tmp/orion_pipe1.sock diff --git a/orion-proxy/src/admin.rs b/orion-proxy/src/admin.rs index 3bc2951c..2166dcb1 100644 --- a/orion-proxy/src/admin.rs +++ b/orion-proxy/src/admin.rs @@ -72,10 +72,12 @@ fn build_admin_router(admin_state: AdminState) -> Router { } #[cfg(feature = "prometheus")] { - use crate::admin::prometheus::prometheus_handler; + use crate::admin::prometheus::{prometheus_handler, stats_handler}; router = router.route("/stats/prometheus", get(prometheus_handler)); + router = router.route("/stats", get(stats_handler)); } + router = router.route("/ready", get(get_ready)); router.with_state(admin_state) diff --git a/orion-proxy/src/admin/config_dump.rs b/orion-proxy/src/admin/config_dump.rs index 5f12ce88..c48f40a2 100644 --- a/orion-proxy/src/admin/config_dump.rs +++ b/orion-proxy/src/admin/config_dump.rs @@ -90,9 +90,9 @@ pub async fn get_config_dump(State(admin_state): State) -> Json load_assignment.endpoints.clone(), - ClusterDiscoveryType::Eds(None) | ClusterDiscoveryType::OriginalDst(_) => vec![], + ClusterDiscoveryType::Eds(None, _) | ClusterDiscoveryType::OriginalDst(_) => vec![], }) .collect(); config.endpoints = (!endpoints.is_empty()).then_some(endpoints); @@ -112,6 +112,7 @@ mod config_dump_tests { core::DataSource, network_filters::http_connection_manager::header_modifer::HeaderModifier, secret::{Secret, TlsCertificate, Type, ValidationContext}, + transport::BindDeviceOptions, Bootstrap, Listener, }; use orion_lib::{ConfigDump, ListenerConfigurationChange}; @@ -188,8 +189,13 @@ mod config_dump_tests { }; let redacted = redact_secrets(vec![secret.clone()]); match &redacted[0].kind { - Type::ValidationContext(vc) => { - assert_eq!(vc.trusted_ca(), &DataSource::InlineString("ca_data".into())); + Type::ValidationContext(vc) => match vc { + ValidationContext::TrustedCA(data_source) => { + assert_eq!(data_source, &DataSource::InlineString("ca_data".into())); + }, + ValidationContext::None => { + assert!(false) + }, }, Type::TlsCertificate(_) => unreachable!(), } @@ -304,7 +310,7 @@ mod config_dump_tests { ); map }, - bind_device: None, + bind_device_options: BindDeviceOptions::default(), proxy_protocol_config: None, with_tls_inspector: false, }; @@ -341,6 +347,7 @@ mod config_dump_tests { let cluster = Cluster { name: CompactString::from("cluster1"), discovery_settings: ClusterDiscoveryType::Static(ClusterLoadAssignment { + cluster_name: "kdjfk".to_owned(), endpoints: vec![LocalityLbEndpoints { priority: 0, lb_endpoints: vec![LbEndpoint { @@ -351,7 +358,7 @@ mod config_dump_tests { }], }), transport_socket: None, - bind_device: None, + bind_device_options: orion_configuration::config::transport::BindDeviceOptions::default(), load_balancing_policy: LbPolicy::default(), http_protocol_options: HttpProtocolOptions::default(), health_check: None, @@ -394,6 +401,7 @@ mod config_dump_tests { let cluster = Cluster { name: CompactString::from("cluster1"), discovery_settings: ClusterDiscoveryType::Static(ClusterLoadAssignment { + cluster_name: "kdjfk".to_owned(), endpoints: vec![LocalityLbEndpoints { priority: 0, lb_endpoints: vec![LbEndpoint { @@ -404,7 +412,7 @@ mod config_dump_tests { }], }), transport_socket: None, - bind_device: None, + bind_device_options: BindDeviceOptions::default(), load_balancing_policy: LbPolicy::default(), http_protocol_options: HttpProtocolOptions::default(), health_check: None, diff --git a/orion-proxy/src/admin/prometheus.rs b/orion-proxy/src/admin/prometheus.rs index 66cf4931..3d47453f 100644 --- a/orion-proxy/src/admin/prometheus.rs +++ b/orion-proxy/src/admin/prometheus.rs @@ -21,18 +21,19 @@ use std::collections::{HashMap, HashSet}; //use ::http::{header, StatusCode}; -use axum::extract::State; +use axum::extract::{Query, State}; use orion_metrics::{ metrics::{clusters, http, listeners, server, server::update_server_metrics, tcp, tls}, sharded::ShardedU64, }; use prometheus::{Encoder, IntCounterVec, IntGaugeVec, Opts, Registry, TextEncoder}; +use serde::Deserialize; use crate::admin::AdminState; -use ::http::{header::HeaderMap, StatusCode}; +use ::http::{header::HeaderMap, HeaderValue, StatusCode}; use opentelemetry::KeyValue; use std::hash::Hash; -use tracing::warn; +use tracing::{debug, warn}; /// Populates a Prometheus `IntCounterVec` by reading from a `ShardedU64`. fn populate_counter_vec( @@ -180,3 +181,33 @@ pub(crate) async fn prometheus_handler( Ok((headers, body)) } + +#[derive(Debug, Deserialize)] +pub(crate) struct QueryParams { + usedonly: Option, +} + +use std::fmt::Write; +pub(crate) async fn stats_handler( + Query(params): Query, + State(_state): State, +) -> Result<(HeaderMap, String), (StatusCode, String)> { + let mut response = String::new(); + debug!("Query params {params:?}"); + if params.usedonly.is_some() { + let _ = writeln!(&mut response, "server.uptime: {}", orion_metrics::metrics::server::util::server_uptime()); + let _ = writeln!(&mut response, "server.state: {}\n", 0); + let _ = writeln!(&mut response, "listener_manager.workers_started: {}", 1); + let _ = writeln!(&mut response, "cluster_manager.cds.update_success: {}", 10); + let _ = writeln!(&mut response, "cluster_manager.cds.update_rejected: {}", 0); + let _ = writeln!(&mut response, "listener_manager.lds.update_success: {}", 10); + let _ = writeln!(&mut response, "listener_manager.lds.update_rejected: {}", 0); + + + let mut headers = HeaderMap::new(); + headers.insert(::http::header::CONTENT_TYPE, HeaderValue::from_static("text/plain;utf-8")); + Ok((headers, response)) + } else { + Err((StatusCode::NOT_FOUND, "Something wrong".to_owned())) + } +} diff --git a/orion-proxy/src/lib.rs b/orion-proxy/src/lib.rs index fae0bfb1..e074107f 100644 --- a/orion-proxy/src/lib.rs +++ b/orion-proxy/src/lib.rs @@ -20,6 +20,7 @@ use orion_configuration::{config::Config, options::Options}; use orion_lib::{Result, RUNTIME_CONFIG}; +use tracing::warn; #[macro_use] mod admin; @@ -43,7 +44,12 @@ pub fn run() -> Result<()> { tracing::warn!("CAP_NET_RAW is NOT available, SO_BINDTODEVICE will not work"); } - proxy::run_orion(bootstrap, access_logging) + if let Err(e) = proxy::run_orion(bootstrap, access_logging) { + warn!("Can't start orion {e:?}"); + Err(e) + } else { + Ok(()) + } } mod proxy_tracing { diff --git a/orion-proxy/src/proxy.rs b/orion-proxy/src/proxy.rs index 576dad4b..8046063e 100644 --- a/orion-proxy/src/proxy.rs +++ b/orion-proxy/src/proxy.rs @@ -49,7 +49,10 @@ use std::{ use tokio::{sync::mpsc::Sender, task::JoinSet}; use tracing::{debug, info, warn}; -pub fn run_orion(bootstrap: Bootstrap, access_log_config: Option) -> Result<()> { +pub fn run_orion( + bootstrap: Bootstrap, + access_log_config: Option, +) -> Result> { debug!("Starting on thread {:?}", std::thread::current().name()); // launch the runtimes... @@ -95,7 +98,10 @@ struct ServiceInfo { metrics: Vec, } -fn launch_runtimes(bootstrap: Bootstrap, access_log_config: Option) -> Result<()> { +fn launch_runtimes( + bootstrap: Bootstrap, + access_log_config: Option, +) -> Result> { let rt_config = runtime_config(); let num_runtimes = rt_config.num_runtimes(); let num_cpus = rt_config.num_cpus(); @@ -119,18 +125,8 @@ fn launch_runtimes(bootstrap: Bootstrap, access_log_config: Option>(); - // The xDS runtime always runs - this is necessary for initialization even if we do not - // use dynamic updates from remote xDS servers. The decision on whether dynamic updates - // are used is based on: - // - The bootstrap loader from orion-data-plane-api gets the list of cluster names used - // in dynamic_resources/ads_config (for grpc_services) - // - resolve ads clusters into endpoints, to be used as xDS address - // TODO: the xDS client could receive updates for endpoints too i.e. dynamic clusters. We - // should replace this with passing a configuration receiver. For now endpoints from - // static clusters. - let ads_cluster_names: Vec = bootstrap.get_ads_configs().iter().map(ToString::to_string).collect(); - let node = bootstrap.node.clone().unwrap_or_else(|| Node { id: "".into(), cluster_id: "".into() }); + let node = bootstrap.node.clone().unwrap_or_else(|| Node { id: "".into(), cluster_id: "".into(), metadata: None }); let (secret_manager, listener_factories, clusters) = get_listeners_and_clusters(bootstrap.clone()).with_context_msg("Failed to get listeners and clusters")?; @@ -142,7 +138,7 @@ fn launch_runtimes(bootstrap: Bootstrap, access_log_config: Option>; diff --git a/orion-proxy/src/xds_configurator.rs b/orion-proxy/src/xds_configurator.rs index 3ff4329b..01459c40 100644 --- a/orion-proxy/src/xds_configurator.rs +++ b/orion-proxy/src/xds_configurator.rs @@ -141,7 +141,7 @@ impl XdsConfigurationHandler { let mut cluster_names = ads_cluster_names.into_iter().cycle(); - let (mut worker, mut client, _subscription_manager) = loop { + let (mut worker, mut client, subscription_manager) = loop { let Some(cluster_name) = cluster_names.next() else { info!("No xDS clusters configured"); return Ok(()); @@ -167,7 +167,7 @@ impl XdsConfigurationHandler { info!("Got notification {xds_update:?}"); let XdsUpdateEvent { ack_channel, updates } = xds_update; // Box::pin because the future from self.process_updates() is very large - let rejected_updates = Box::pin(self.process_updates(updates)).await; + let rejected_updates = Box::pin(self.process_updates(updates, &subscription_manager)).await; let _ = ack_channel.send(rejected_updates); }, Some(health_update) = self.health_updates_receiver.recv() => Self::process_health_event(&health_update), @@ -180,17 +180,17 @@ impl XdsConfigurationHandler { Ok(()) } - async fn process_updates(&mut self, updates: Vec) -> Vec { + async fn process_updates(&mut self, updates: Vec, subscibtion_manager: &DeltaDiscoverySubscriptionManager) -> Vec { let mut rejected_updates = Vec::new(); for update in updates { match update { XdsResourceUpdate::Update(id, resource, _) => { - if let Err(e) = self.process_update_event(&id, resource).await { + if let Err(e) = self.process_update_event(&id, resource, subscibtion_manager).await { rejected_updates.push(RejectedConfig::from((id, e))); } }, XdsResourceUpdate::Remove(id, resource) => { - if let Err(e) = self.process_remove_event(&id, resource).await { + if let Err(e) = self.process_remove_event(&id, resource, subscibtion_manager).await { rejected_updates.push(RejectedConfig::from((id, e))); } }, @@ -199,9 +199,11 @@ impl XdsConfigurationHandler { rejected_updates } - async fn process_remove_event(&mut self, id: &str, resource: TypeUrl) -> Result<()> { + async fn process_remove_event(&mut self, id: &str, resource: TypeUrl, subscibtion_manager: &DeltaDiscoverySubscriptionManager) -> Result<()> { match resource { orion_xds::xds::model::TypeUrl::Cluster => { + let maybe_unsubscribed = subscibtion_manager.subscribe(id.to_owned(), TypeUrl::ClusterLoadAssignment).await; + debug!("Updating unsubscribed for {id} {} {maybe_unsubscribed:?} ", TypeUrl::ClusterLoadAssignment); orion_lib::clusters::remove_cluster(id)?; self.health_manager.stop_cluster(id).await; Ok(()) @@ -235,7 +237,7 @@ impl XdsConfigurationHandler { } #[allow(clippy::too_many_lines)] - async fn process_update_event(&mut self, _: &str, resource: XdsResourcePayload) -> Result<()> { + async fn process_update_event(&mut self, _: &str, resource: XdsResourcePayload, subscibtion_manager: &DeltaDiscoverySubscriptionManager) -> Result<()> { match resource { XdsResourcePayload::Listener(id, listener) => { debug!("Got update for listener {id} {:?}", listener); @@ -245,6 +247,23 @@ impl XdsConfigurationHandler { match factory { Ok(factory) => { let change = ListenerConfigurationChange::Added(Box::new((factory, listener.clone()))); + let subscriptions = listener.filter_chains.values().filter_map(|fc| + match &fc.terminal_filter{ + orion_configuration::config::listener::MainFilter::Http(http_connection_manager) => match &http_connection_manager.route_specifier{ + orion_configuration::config::network_filters::http_connection_manager::RouteSpecifier::Rds(rds_specifier) => Some(async { + let id= rds_specifier.route_config_name.to_string(); + let maybe_subscribed = subscibtion_manager.subscribe(id.clone(), TypeUrl::RouteConfiguration).await; + debug!("Updating subscription for {id} {} {maybe_subscribed:?} ", TypeUrl::RouteConfiguration); + }), + _ => None, + } + + orion_configuration::config::listener::MainFilter::Tcp(_) => None, + } + ).collect::>(); + + let _res = join_all(subscriptions).await; + let _ = send_change_to_runtimes(&self.listeners_senders, change).await; // update access logs configuration... self.access_log_listener_update(&id, &listener).await; @@ -264,7 +283,12 @@ impl XdsConfigurationHandler { debug!("Got update for cluster: {id}: {:#?}", cluster); let cluster_builder = PartialClusterType::try_from((cluster, &*self.secret_manager.read())); match cluster_builder { - Ok(cluster) => self.add_cluster(cluster).await, + Ok(cluster) => { + let maybe_subscribed = subscibtion_manager.subscribe(id.clone(), TypeUrl::ClusterLoadAssignment).await; + debug!("Updating subscription for {id} {} {maybe_subscribed:?} ", TypeUrl::ClusterLoadAssignment); + self.add_cluster(cluster).await + + }, Err(err) => { warn!("Got invalid update for cluster {id}"); Err(err) diff --git a/orion-xds/Cargo.toml b/orion-xds/Cargo.toml index 6669c1a8..8f3ea45a 100644 --- a/orion-xds/Cargo.toml +++ b/orion-xds/Cargo.toml @@ -1,36 +1,37 @@ [package] -edition.workspace = true +edition.workspace = true license-file.workspace = true -name = "orion-xds" +name = "orion-xds" rust-version.workspace = true -version.workspace = true +version.workspace = true [dependencies] -bytes.workspace = true -futures.workspace = true +bytes.workspace = true +futures.workspace = true orion-configuration.workspace = true orion-data-plane-api.workspace = true -orion-error.workspace = true +orion-error.workspace = true -http.workspace = true -serde.workspace = true -tokio.workspace = true -tower.workspace = true +http.workspace = true +serde.workspace = true +tokio.workspace = true +tower.workspace = true tracing.workspace = true async-stream = "0.3" -atomic-take = "1.1.0" +atomic-take = "1.1.0" -thiserror = "2.0.11" +thiserror = "2.0.11" tokio-stream.workspace = true -uuid = { version = "1.7.0", features = ["v4"] } +uuid = { version = "1.7.0", features = ["v4"] } [dev-dependencies] orion-data-plane-api.workspace = true serde_yaml.workspace = true -tokio.workspace = true -tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tokio.workspace = true +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +hyper-util.workspace = true [lints] workspace = true diff --git a/orion-xds/examples/client.rs b/orion-xds/examples/client.rs index 323139ca..6364ec6b 100644 --- a/orion-xds/examples/client.rs +++ b/orion-xds/examples/client.rs @@ -36,7 +36,7 @@ async fn main() -> Result<(), Box> { .init(); let (mut worker, mut client, _subscription_manager) = start_aggregate_client( - Node { id: "node1".into(), cluster_id: "cluster_id".into() }, + Node { id: "node1".into(), cluster_id: "cluster_id".into(), metadata: None }, "http://127.0.0.1:50051".parse()?, ) .await?; diff --git a/orion-xds/src/xds/client.rs b/orion-xds/src/xds/client.rs index 8b687d29..6333df51 100644 --- a/orion-xds/src/xds/client.rs +++ b/orion-xds/src/xds/client.rs @@ -269,7 +269,7 @@ impl DeltaClientBackgroundWorker { ) { match event { SubscriptionEvent::Subscribe(type_url, resource_id) => { - debug!(type_url = type_url.to_string(), resource_id, "processing new subscription"); + debug!("processing new subscription type_url={} {resource_id}", type_url.to_string()); let is_new = state.subscriptions.entry(type_url).or_default().insert(resource_id.clone()); if is_new { if let Err(err) = discovery_requests_tx @@ -286,7 +286,7 @@ impl DeltaClientBackgroundWorker { } }, SubscriptionEvent::Unsubscribe(type_url, resource_id) => { - debug!(type_url = type_url.to_string(), resource_id, "processing unsubscribe"); + debug!("processing unsubscribe type_url={} {resource_id}", type_url.to_string()); let was_subscribed = state.subscriptions.entry(type_url).or_default().remove(resource_id.as_str()); if was_subscribed { if let Err(err) = discovery_requests_tx @@ -353,8 +353,9 @@ impl DeltaClientBackgroundWorker { warn!(type_url = type_url.to_string(), error_msg, nonce, "rejecting configs with nack response"); Some(StatusBuilder::invalid_argument().with_message(error_msg).build()) }; - let upstream_response = DeltaDiscoveryRequestBuilder::for_resource(type_url) - .with_nounce(nonce.clone()) + + let upstream_response = DeltaDiscoveryRequestBuilder::for_resource(type_url) + .with_nonce(nonce.clone()) .with_error_detail(maybe_error) .build(); if let Err(err) = acknowledgments_tx.send(upstream_response).await { @@ -373,7 +374,7 @@ impl DeltaClientBackgroundWorker { .join("; "); let error_msg = format!("timed out trying to apply resource updates for [{version_info}]"); let upstream_response = DeltaDiscoveryRequestBuilder::for_resource(type_url) - .with_nounce(nonce.clone()) + .with_nonce(nonce.clone()) .with_error_detail(Some(StatusBuilder::unspecified_error().with_message(error_msg).build())) .build(); let _ = acknowledgments_tx.send(upstream_response).await; @@ -389,7 +390,7 @@ impl DeltaClientBackgroundWorker { error_msg, nonce, "decoding error, rejecting configs with nack response" ); let upstream_nack_response = DeltaDiscoveryRequestBuilder::for_resource(type_url) - .with_nounce(nonce) + .with_nonce(nonce) .with_error_detail(Some(StatusBuilder::invalid_argument().with_message(error_msg).build())) .build(); if let Err(err) = acknowledgments_tx.send(upstream_nack_response).await { diff --git a/orion-xds/src/xds/request.rs b/orion-xds/src/xds/request.rs index 671e0053..50232d6e 100644 --- a/orion-xds/src/xds/request.rs +++ b/orion-xds/src/xds/request.rs @@ -82,7 +82,7 @@ impl DeltaDiscoveryRequestBuilder { self } - pub fn with_nounce(mut self, nounce: String) -> Self { + pub fn with_nonce(mut self, nounce: String) -> Self { self.nounce = Some(nounce); self } @@ -111,10 +111,10 @@ impl DeltaDiscoveryRequestBuilder { } pub fn build(self) -> DeltaDiscoveryRequest { - let Node { id, cluster_id } = self.node.unwrap_or_default(); + let Node { id, cluster_id, metadata } = self.node.unwrap_or_default(); let nounce = self.nounce.unwrap_or_default(); DeltaDiscoveryRequest { - node: Some(EnvoyNode { id: id.into(), cluster: cluster_id.into(), ..Default::default() }), + node: Some(EnvoyNode { id: id.into(), cluster: cluster_id.into(), metadata, ..Default::default() }), response_nonce: nounce, type_url: self.type_url.to_string(), resource_names_subscribe: self.resource_names_subscribe, diff --git a/orion-xds/tests/bootstrap.rs b/orion-xds/tests/bootstrap.rs new file mode 100644 index 00000000..be949dc6 --- /dev/null +++ b/orion-xds/tests/bootstrap.rs @@ -0,0 +1,329 @@ +// SPDX-FileCopyrightText: © 2025 kmesh authors +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 kmesh authors +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use orion_data_plane_api::envoy_data_plane_api::envoy::config::{ + bootstrap::v3::Bootstrap, + core::v3::{address, socket_address::PortSpecifier, Address, SocketAddress}, + endpoint::v3::lb_endpoint::HostIdentifier, +}; +use orion_data_plane_api::{ + bootstrap_loader::bootstrap::{BootstrapLoader, BootstrapResolver, XdsConfig, XdsType}, + decode::from_yaml, + xds::model::TypeUrl, +}; +use std::{collections::HashSet, path::PathBuf}; + +#[test] +fn read_static_resource() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + assert_eq!(listener.name, "listener_0".to_owned()); + + let routes = loader.get_static_route_configs().unwrap(); + assert_eq!(routes.len(), 0); + + let mut clusters = loader.get_static_cluster_configs().unwrap(); + assert_eq!(clusters.len(), 3); + + let cluster = clusters.drain(..).next().unwrap(); + let orion_data_plane_api::envoy_data_plane_api::envoy::config::cluster::v3::Cluster { load_assignment, .. } = + cluster; + + let endpoints = load_assignment.unwrap().endpoints.drain(..).next().unwrap().lb_endpoints; + let endpoint = endpoints.first().unwrap(); + let Some(HostIdentifier::Endpoint(ref ept_any)) = endpoint.host_identifier else { + panic!("None valid endpoint"); + }; + + let Some(Address { address: ept_addr_any }) = ept_any.clone().address else { + panic!("No valid address from endpoint"); + }; + let Some(address::Address::SocketAddress(ept_socket_addr)) = ept_addr_any else { + panic!("No valid socket address from endpoint"); + }; + assert_eq!(ept_socket_addr.address, "127.0.0.1"); + let Some(PortSpecifier::PortValue(port)) = ept_socket_addr.port_specifier else { + panic!("No valid port value from endpoint"); + }; + assert_eq!(port, 5678); +} + +#[test] +fn read_dynamic_resource() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 1); + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Individual(TypeUrl::RouteConfiguration), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_owned(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5678)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} + +#[test] +fn read_ads_config() { + const ADS_BOOTSTRAP: &str = r#" +dynamic_resources: + ads_config: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: ads_cluster + lds_config: + ads: {} + cds_config: + ads: {} +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + clusters: + - name: ads_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: ads_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + "#; + let bootstrap: Bootstrap = from_yaml(ADS_BOOTSTRAP).unwrap(); + let loader = BootstrapLoader::from(bootstrap); + + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 1); + + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Aggregated(HashSet::from([TypeUrl::Listener, TypeUrl::Cluster])), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_owned(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5679)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} + +#[test] +fn read_mixture_config() { + const BOOTSTRAP: &str = r#" +dynamic_resources: + ads_config: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: ads_cluster + lds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: lds_cluster + cds_config: + resource_api_version: V3 + ads: {} + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + - name: lds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: lds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5677 + - name: ads_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: ads_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + + "#; + let bootstrap: Bootstrap = from_yaml(BOOTSTRAP).unwrap(); + let loader = BootstrapLoader::from(bootstrap); + + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 3); + + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Aggregated(HashSet::from([TypeUrl::Cluster])), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_owned(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5678)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); + + assert_eq!( + xds_configs[1], + XdsConfig( + XdsType::Individual(TypeUrl::Listener), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_owned(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5677)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); + + assert_eq!( + xds_configs[2], + XdsConfig( + XdsType::Individual(TypeUrl::RouteConfiguration), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_owned(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5679)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} diff --git a/orion-xds/tests/bootstrap_with_dynamic_resource.yml b/orion-xds/tests/bootstrap_with_dynamic_resource.yml new file mode 100644 index 00000000..53ddb1e1 --- /dev/null +++ b/orion-xds/tests/bootstrap_with_dynamic_resource.yml @@ -0,0 +1,110 @@ +dynamic_resources: + lds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: lds_cluster + cds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: cds_cluster + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + - name: cds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + - name: lds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: lds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5677 diff --git a/orion-xds/tests/bootstrap_with_http_connection_manager.yml b/orion-xds/tests/bootstrap_with_http_connection_manager.yml new file mode 100644 index 00000000..6b6df551 --- /dev/null +++ b/orion-xds/tests/bootstrap_with_http_connection_manager.yml @@ -0,0 +1,105 @@ +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 1234 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + - name: cluster_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1111 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 2222 + + - name: cluster_2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cluster_2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3333 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4444 \ No newline at end of file diff --git a/orion-xds/tests/bootstrap_with_http_filters.yml b/orion-xds/tests/bootstrap_with_http_filters.yml new file mode 100644 index 00000000..ba1d8486 --- /dev/null +++ b/orion-xds/tests/bootstrap_with_http_filters.yml @@ -0,0 +1,76 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + - filters: + - name: sending-to-server + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: http_traffic + route_config: + name: local_route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/books/" + route: + prefix_rewrite: "/" + cluster: some_service + - match: + prefix: "/" + direct_response: + status: 403 + body: + inline_string: "Page does not exist!" + http_filters: + - name: sample customized filter + typed_config: + "@type": type.googleapis.com/sample.SampleFilter + key: DATA + val: "123" + - name: bandwidth_limit_filter + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + stat_prefix: bandwidth_limit + enable_mode: REQUEST_AND_RESPONSE + limit_kbps: 1000 + - name: health_check_filter + ## https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/health_checking + typed_config: + "@type":"type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck" + pass_through_mode: true + headers: + - name: ":path" + exact_match: "/healthz" + cache_time: 30000 + + + + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 + diff --git a/orion-xds/tests/bootstrap_with_tcp_proxy.yml b/orion-xds/tests/bootstrap_with_tcp_proxy.yml new file mode 100644 index 00000000..bcf41030 --- /dev/null +++ b/orion-xds/tests/bootstrap_with_tcp_proxy.yml @@ -0,0 +1,35 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: destination + cluster: some_service + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 \ No newline at end of file diff --git a/orion-xds/tests/bootstrap_with_tls_server.yml b/orion-xds/tests/bootstrap_with_tls_server.yml new file mode 100644 index 00000000..b3b6d988 --- /dev/null +++ b/orion-xds/tests/bootstrap_with_tls_server.yml @@ -0,0 +1,20 @@ + +static_resources: + listeners: + - name: listener_https + address: + socket_address: { address: 127.0.0.1, port_value: 8443 } + filterChains: + - transportSocket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + commonTlsContext: + tlsParams: + tlsMinimumProtocolVersion: TLSv1_3 + tlsMaximumProtocolVersion: TLSv1_3 + tlsCertificates: + - certificateChain: + filename: ./tests/server.crt + privateKey: + filename: ./tests/server.key diff --git a/orion-xds/tests/bootstrap_with_weighted_cluster.yml b/orion-xds/tests/bootstrap_with_weighted_cluster.yml new file mode 100644 index 00000000..9bca0baa --- /dev/null +++ b/orion-xds/tests/bootstrap_with_weighted_cluster.yml @@ -0,0 +1,59 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: destination + weighted_clusters: + - cluster_weight: + name: some_service_0 + weight: 25 + - cluster_weight: + name: some_service_1 + weight: 75 + clusters: + - name: some_service_0 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 + - name: some_service_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1201 \ No newline at end of file diff --git a/orion-xds/tests/envoy_validation.rs b/orion-xds/tests/envoy_validation.rs new file mode 100644 index 00000000..dd7a42c3 --- /dev/null +++ b/orion-xds/tests/envoy_validation.rs @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: © 2025 kmesh authors +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 kmesh authors +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use orion_data_plane_api::{ + bootstrap_loader::bootstrap::{BootstrapLoader, BootstrapResolver}, + decode::from_yaml, + envoy_data_plane_api::{ + envoy::extensions::filters::network::http_connection_manager::v3::http_connection_manager::CodecType, + google::protobuf::Duration, + }, + envoy_validation::{ClusterValidation, FilterChainValidation, FilterValidation, LocalRateLimitValidation}, +}; +use std::path::PathBuf; + +#[test] +fn yaml_get_downstream_tls_context() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_tls_server.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + + let ctx = fc[0].get_downstream_tls_context().unwrap().expect("DownstreamTlsContext is missing"); + assert_eq!(ctx.common_tls_context.unwrap().tls_params.unwrap().tls_minimum_protocol_version, 4); + //tls1.3 +} + +#[test] +fn yaml_get_downstream_tls_context_is_none() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + assert!(fc[0].get_downstream_tls_context().unwrap().is_none()); +} + +#[test] +fn yaml_get_http_connection_manager() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + + let _httpman = fc[0].filters[0].get_http_connection_manager().unwrap().expect("HttpConnectionManager is missing"); +} + +#[test] +fn filter_codec_type() { + const INP_FILTER: &str = r#" +name: http_gateway +typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1"#; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::listener::v3::Filter; + let filter: Filter = from_yaml(INP_FILTER).unwrap(); + let httpman = filter.get_http_connection_manager().unwrap().unwrap(); + assert_eq!(CodecType::try_from(httpman.codec_type).unwrap().as_str_name(), "HTTP1"); +} + +#[test] +fn cluster_http_proto_options_ext() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_dynamic_resource.yml"); + + const INP_CLUSTER: &str = r#" +name: xds_cluster +connect_timeout: 0.25s +type: STATIC +lb_policy: ROUND_ROBIN +typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s +"#; + + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::cluster::v3::Cluster, + extensions::upstreams::http::v3::http_protocol_options::{ + explicit_http_config::ProtocolConfig, UpstreamProtocolOptions, + }, + }; + + let cluster: Cluster = from_yaml(INP_CLUSTER).unwrap(); + let proto_opts = cluster.get_http_protocol_options().unwrap().unwrap(); + + let upstream_opts = proto_opts.upstream_protocol_options.unwrap(); + if let UpstreamProtocolOptions::ExplicitHttpConfig(cfg) = upstream_opts { + if let ProtocolConfig::Http2ProtocolOptions(ref h2_opts) = cfg.protocol_config.as_ref().unwrap() { + let ka = h2_opts.connection_keepalive.as_ref().unwrap(); + assert_eq!(ka.interval.as_ref().unwrap(), &Duration { seconds: 30, nanos: 0 }); + assert_eq!(ka.timeout.as_ref().unwrap(), &Duration { seconds: 5, nanos: 0 }); + } else { + panic!("Expecting http2 options, got {cfg:?}"); + } + } else { + panic!("Expecting ExplicitHttpConfig, got {upstream_opts:?}"); + } +} + +#[test] +fn yaml_get_local_ratelimit() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_local_ratelimit.yml"); + + const INP_LOCAL_RATELIMIT: &str = r#" + match: {prefix: "/path/with/rate/limit"} + route: {cluster: service_protected_by_rate_limit} + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_ratelimit + token_bucket: + max_tokens: "10000" + tokens_per_fill: "1000" + fill_interval: "5s" +"#; + + use orion_data_plane_api::envoy_data_plane_api::envoy::config::route::v3::Route; + + let route: Route = from_yaml(INP_LOCAL_RATELIMIT).unwrap(); + let local_ratelimit = route.get_local_ratelimit().unwrap().unwrap(); + assert_eq!(local_ratelimit.token_bucket.unwrap().max_tokens, 10000); +} diff --git a/orion-xds/tests/xds.rs b/orion-xds/tests/xds.rs new file mode 100644 index 00000000..0176bd95 --- /dev/null +++ b/orion-xds/tests/xds.rs @@ -0,0 +1,472 @@ +// SPDX-FileCopyrightText: © 2025 kmesh authors +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 kmesh authors +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +use hyper_util::rt::tokio::TokioIo; +use orion_configuration::config::bootstrap::Node; +use orion_data_plane_api::envoy_data_plane_api::prost::Message; +use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::cluster::v3::Cluster, + service::{ + cluster::v3::{ + cluster_discovery_service_client::ClusterDiscoveryServiceClient, + cluster_discovery_service_server::{ClusterDiscoveryService, ClusterDiscoveryServiceServer}, + }, + discovery::v3::{ + aggregated_discovery_service_client::AggregatedDiscoveryServiceClient, + aggregated_discovery_service_server::{AggregatedDiscoveryService, AggregatedDiscoveryServiceServer}, + DeltaDiscoveryResponse, DiscoveryResponse, Resource, + }, + }, + }, + google::protobuf::Any, + tonic, +}; +use std::{ + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; +use tonic::transport::Server; + +use orion_xds::xds::{ + bindings, + client::DiscoveryClientBuilder, + model::{TypeUrl, XdsResourceUpdate}, +}; + +use futures::Stream; + +use tokio::{ + sync::{mpsc, Mutex}, + time::{self, sleep}, +}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Uri, Response, Status}; +use tower::service_fn; +pub struct MockDiscoveryService { + relay: Arc>>>, +} + +#[tonic::async_trait] +impl AggregatedDiscoveryService for MockDiscoveryService { + type StreamAggregatedResourcesStream = Pin> + Send>>; + async fn stream_aggregated_resources( + &self, + _request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + unimplemented!("not used by proxy"); + } + + type DeltaAggregatedResourcesStream = Pin> + Send>>; + async fn delta_aggregated_resources( + &self, + request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + let mut in_stream = request.into_inner(); + let (tx, rx) = mpsc::channel::>(100); + let shared_receiver = self.relay.clone(); + tokio::spawn(async move { + let mut receiver = shared_receiver.lock().await; + 'outer: while let Ok(result) = in_stream.message().await { + match result { + Some(_) => { + while let Some(wrapped_response) = receiver.recv().await { + match tx.send(wrapped_response.clone()).await { + Ok(_) => { + if wrapped_response.is_err() { + break 'outer; + } + }, + _ => { + break 'outer; + }, + } + } + }, + _ => { + break; + }, + } + } + }); + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::DeltaAggregatedResourcesStream)) + } +} + +#[tonic::async_trait] +impl ClusterDiscoveryService for MockDiscoveryService { + type StreamClustersStream = Pin> + Send>>; + async fn stream_clusters( + &self, + _request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + unimplemented!("not used by proxy"); + } + + type DeltaClustersStream = Pin> + Send>>; + async fn delta_clusters( + &self, + request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + let mut in_stream = request.into_inner(); + let (tx, rx) = mpsc::channel::>(100); + let shared_receiver = self.relay.clone(); + tokio::spawn(async move { + let mut receiver = shared_receiver.lock().await; + 'outer: while let Ok(result) = in_stream.message().await { + match result { + Some(_) => { + while let Some(wrapped_response) = receiver.recv().await { + match tx.send(wrapped_response.clone()).await { + Ok(_) => { + if wrapped_response.is_err() { + break 'outer; + } + }, + _ => { + break 'outer; + }, + } + } + }, + _ => { + break; + }, + } + } + }); + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::DeltaClustersStream)) + } + + async fn fetch_clusters( + &self, + _request: tonic::Request< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + unimplemented!("not used by proxy"); + } +} + +#[tokio::test] +async fn test_client_operations() { + let node = Node { id: "node-id".into(), cluster_id: "gw-cluster".into(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_owned(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + value: cluster.encode_to_vec(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_owned(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + tokio::spawn(async move { + Server::builder() + .add_service(ClusterDiscoveryServiceServer::new(cds_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::other("client is already taken")) + } + } + })) + .await; + + let cds_client = ClusterDiscoveryServiceClient::new(channel.unwrap()); + let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + + let (mut worker, mut client, subscription_manager) = + DiscoveryClientBuilder::::new(node, typed_binding).build().unwrap(); + + tokio::spawn(async move { + let _status = worker.run().await; + }); + + let _status = server_side_response_tx.send(initial_response).await; + + let _ = subscription_manager.subscribe(String::new(), TypeUrl::Cluster).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload, _version)) => { + assert_eq!(name, "cluster-a"); + let ack_result = captured_response.ack_channel.send(vec![]); + assert!(ack_result.is_ok(), "failed to acknowledge response"); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(5)) => + panic!("timed out waiting for xds resource over update channel") + } +} + +#[tokio::test] +async fn test_client_resilience() { + let node = Node { id: "node-id".into(), cluster_id: "gw-cluster".into(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_owned(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + value: cluster.encode_to_vec(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_owned(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + + tokio::spawn(async move { + Server::builder() + .add_service(ClusterDiscoveryServiceServer::new(cds_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector_lazy(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::other("client is already taken")) + } + } + })); + + let cds_client = ClusterDiscoveryServiceClient::new(channel); + let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + + let (mut worker, mut client, subscription_manager) = + DiscoveryClientBuilder::::new(node, typed_binding) + .subscribe_resource_name("cluster-a".to_owned()) + .subscribe_resource_name("cluster-b".to_owned()) + .build() + .unwrap(); + + let _ = subscription_manager.subscribe(String::new(), TypeUrl::Cluster).await; + tokio::spawn(async move { + let status = worker.run().await; + }); + let captured_count = AtomicUsize::new(0); + + let _status = server_side_response_tx.send(initial_response.clone()).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload, _version)) => { + assert_eq!(name, "cluster-a"); + let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); + assert_eq!( + captured_count.load(Ordering::Relaxed), + 1, + "cluster-a should be captured just once after some time" + ); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(3)) => + panic!("timed out waiting for xds resource over update channel") + } + + let abort_response: Result = + Err(tonic::Status::aborted("kill the stream for testing purposes")); + let _status = server_side_response_tx.send(abort_response).await; + sleep(Duration::from_millis(300)).await; + + let _status = server_side_response_tx.send(initial_response.clone()).await; + sleep(Duration::from_millis(300)).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload, _version)) => { + assert_eq!(name, "cluster-a"); + let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); + assert_eq!( + captured_count.load(Ordering::Relaxed), + 2, + "cluster-a should be captured again after reconnect" + ); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(3)) => + panic!("timed out waiting for xds resource over update channel") + } +} + +#[tokio::test] +async fn test_aggregated_discovery() { + let node = Node { id: "node-id".into(), cluster_id: "gw-cluster".into(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_owned(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_owned(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + value: cluster.encode_to_vec(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_owned(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let ads_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + tokio::spawn(async move { + Server::builder() + .add_service(AggregatedDiscoveryServiceServer::new(ads_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::other("client is already taken")) + } + } + })) + .await + .unwrap(); + + let ads_client = AggregatedDiscoveryServiceClient::new(channel.clone()); + let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + + let client = DiscoveryClientBuilder::new(node.clone(), typed_binding) + .subscribe_resource_name("my-cluster".to_owned()) + .build(); + assert!(client.is_err(), "cannot subscribe to resources without a type_url for ADS"); + + let ads_client = AggregatedDiscoveryServiceClient::new(channel); + let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + + let (mut worker, mut client, subscription_manager) = + DiscoveryClientBuilder::new(node, typed_binding).build().unwrap(); + + let _ = subscription_manager.subscribe("cluster-a".to_owned(), TypeUrl::Cluster).await; + let _ = subscription_manager.subscribe("cluster-z".to_owned(), TypeUrl::Cluster).await; + let _ = subscription_manager.subscribe("endpoints-a".to_owned(), TypeUrl::ClusterLoadAssignment).await; + let _ = subscription_manager.subscribe("secret-config-a".to_owned(), TypeUrl::Secret).await; + + tokio::spawn(async move { + let _status = worker.run().await; + }); + + let _status = server_side_response_tx.send(initial_response).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload, _version)) => { + assert_eq!(name, "cluster-a"); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(5)) => + panic!("timed out waiting for xds resource over update channel") + } +} diff --git a/tools/hyper_unix_listener/Cargo.toml b/tools/hyper_unix_listener/Cargo.toml new file mode 100644 index 00000000..ebeb78d4 --- /dev/null +++ b/tools/hyper_unix_listener/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "hyper_unix_listener" +description.workspace = true +edition.workspace = true +license-file.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +tokio.workspace = true +hyper = { version = "1", features = ["full"] } +clap = { version = "4.5.46", features = ["derive"] } + +hyperlocal = "0.9.1" + +[lints] +workspace = true diff --git a/tools/hyper_unix_listener/src/main.rs b/tools/hyper_unix_listener/src/main.rs new file mode 100644 index 00000000..e1813f1c --- /dev/null +++ b/tools/hyper_unix_listener/src/main.rs @@ -0,0 +1,27 @@ +use std::path::PathBuf; + +use clap::Parser; +use hyper::Response; +use hyperlocal::UnixListenerExt; +use tokio::net::UnixListener; + +#[derive(Parser, Debug, Clone)] +pub struct Options { + #[arg(long)] + path_name: PathBuf, +} +#[tokio::main] +#[allow(clippy::expect_used)] +async fn main() { + let args: Options = Options::parse(); + + let future = async move { + let listener = UnixListener::bind(args.path_name).expect("parsed unix path"); + + listener + .serve(|| |_request| async { Ok::<_, hyper::Error>(Response::new("Hello, world.".to_owned())) }) + .await + .expect("failed to serve a connection") + }; + future.await +}