diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..d6a8c164 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +/target +.idea/ +*.profraw +*~ +*.html +*.log.* diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..47ff0eb1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,24 @@ +[submodule "envoy-data-plane-api/data-plane-api"] + path = envoy-data-plane-api/data-plane-api + url = https://github.com/envoyproxy/data-plane-api.git +[submodule "envoy-data-plane-api/xds"] + path = envoy-data-plane-api/xds + url = https://github.com/cncf/xds.git +[submodule "envoy-data-plane-api/protoc-gen-validate"] + path = envoy-data-plane-api/protoc-gen-validate + url = https://github.com/bufbuild/protoc-gen-validate.git +[submodule "envoy-data-plane-api/googleapis"] + path = envoy-data-plane-api/googleapis + url = https://github.com/googleapis/googleapis.git +[submodule "envoy-data-plane-api/opencensus-proto"] + path = envoy-data-plane-api/opencensus-proto + url = https://github.com/census-instrumentation/opencensus-proto.git +[submodule "envoy-data-plane-api/opentelemetry-proto"] + path = envoy-data-plane-api/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto.git +[submodule "envoy-data-plane-api/prometheus-client-model"] + path = envoy-data-plane-api/prometheus-client-model + url = https://github.com/prometheus/client_model.git +[submodule "envoy-data-plane-api/cel-spec"] + path = envoy-data-plane-api/cel-spec + url = https://github.com/google/cel-spec.git diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 00000000..4e3d6416 --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,17 @@ +### Current +|Name| Affiliation| Contact | +|--------|------|---| +|Alan Keane| Huawei Ireland Research Lab|alan.keane1@huawei.com| +|Dawid Nowak| Huawei Ireland Research Lab|dawid.nowak@huawei.com| +|Francesco Ciaccia| Huawei Ireland Research Lab|francesco.ciaccia1@huawei-partners.com| +|Nicola Bonelli | Huawei Ireland Research Lab|nicola.bonelli@huawei-partners.com| +|Wang Ruize | Huawei| wangruize1@huawei.com| + + +### Past +|Name| Affiliation| +|--------|------| +|Liu Xiang | Huawei| +|Rui Ferreira | Huawei Ireland Research Lab| +|Oriol Arcas | Huawei Ireland Research Lab| +|Hayley Deckers | Huawei Ireland Research Lab| diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000..58dbc964 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3162 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "abort-on-drop" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd6d700ad9af641490c1f7d67980d2de4d1433016e5b12f819448d3c832142a" +dependencies = [ + "tokio", +] + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "affinity" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "763e484feceb7dd021b21c5c6f81aee06b1594a743455ec7efbf72e6355e447b" +dependencies = [ + "cfg-if", + "errno", + "libc", + "num_cpus", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.64", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-take" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" + +[[package]] +name = "atomic-time" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9622f5c6fb50377516c70f65159e70b25465409760c6bd6d4e581318bf704e83" +dependencies = [ + "once_cell", + "portable-atomic", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "aws-lc-rs" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + +[[package]] +name = "axum" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.1", + "tower 0.5.1", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.1", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64-serde" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba368df5de76a5bea49aaf0cf1b39ccfbbef176924d1ba5db3e4135216cbe3c7" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "bindgen" +version = "0.69.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn", + "which", +] + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" + +[[package]] +name = "caps" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190baaad529bcfbde9e1a19022c42781bdb6ff9de25721abdb8fd98c0807730b" +dependencies = [ + "libc", + "thiserror 1.0.64", +] + +[[package]] +name = "castaway" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +dependencies = [ + "rustversion", +] + +[[package]] +name = "cc" +version = "1.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "cmake" +version = "0.1.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +dependencies = [ + "cc", +] + +[[package]] +name = "colorchoice" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "compact_str" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6050c3a16ddab2e412160b31f2c871015704239bca62f72f6e5f0be631d3f644" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "rustversion", + "ryu", + "serde", + "static_assertions", +] + +[[package]] +name = "console-api" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ed14aa9c9f927213c6e4f3ef75faaad3406134efe84ba2cb7983431d5f0931" +dependencies = [ + "futures-core", + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e3a111a37f3333946ebf9da370ba5c5577b18eb342ec683eb488dd21980302" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures-task", + "hdrhistogram", + "humantime", + "hyper-util", + "prost", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crc32fast" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "dhat" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cd11d84628e233de0ce467de10b8633f4ddaecafadefc86e13b84b8739b827" +dependencies = [ + "backtrace", + "lazy_static", + "mintex", + "parking_lot", + "rustc-hash 1.1.0", + "serde", + "serde_json", + "thousands", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "envoy-data-plane-api" +version = "0.1.9" +dependencies = [ + "glob", + "prost", + "prost-build", + "prost-reflect", + "serde", + "serde_json", + "serde_yaml", + "tonic", + "tonic-build", + "tonic-health", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "exponential-backoff" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "949eb68d436415e37b7a69c49a9900d5337616b0e420377ccc48038b86261e16" +dependencies = [ + "fastrand", +] + +[[package]] +name = "fastrand" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flate2" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "h2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.6.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "flate2", + "nom", + "num-traits", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "thiserror 1.0.64", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand", + "resolv-conf", + "smallvec", + "thiserror 1.0.64", + "tokio", + "tracing", +] + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "http-serde-ext" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "665c24b8e7e21688dc74edb228f07c1815bbc7ff3b48a3ee72fa20937fbde095" +dependencies = [ + "http", + "serde", +] + +[[package]] +name = "httparse" +version = "1.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "log", + "rustls", + "rustls-native-certs 0.8.0", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +dependencies = [ + "equivalent", + "hashbrown 0.15.0", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +dependencies = [ + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror 1.0.64", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.159" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" + +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "mintex" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bec4598fddb13cc7b528819e697852653252b760f1228b7642679bf2ff2cd07" + +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi", + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.36.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +dependencies = [ + "memchr", +] + +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "orion-configuration" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "base64-serde", + "bytes", + "clap", + "compact_str", + "envoy-data-plane-api", + "exponential-backoff", + "http", + "http-serde-ext", + "humantime-serde", + "ipnet", + "num_cpus", + "orion-data-plane-api", + "orion-error", + "regex", + "serde", + "serde_path_to_error", + "serde_regex", + "serde_yaml", + "thiserror 1.0.64", + "tracing", + "tracing-subscriber", + "tracing-test", + "url", +] + +[[package]] +name = "orion-data-plane-api" +version = "0.1.9" +dependencies = [ + "anyhow", + "async-stream", + "envoy-data-plane-api", + "futures", + "hyper-util", + "serde", + "serde_yaml", + "thiserror 1.0.64", + "tokio", + "tokio-stream", + "tokio-util", + "tower 0.5.1", + "tracing", +] + +[[package]] +name = "orion-error" +version = "0.1.0" + +[[package]] +name = "orion-lib" +version = "0.1.0" +dependencies = [ + "abort-on-drop", + "async-stream", + "atomic-time", + "bytes", + "compact_str", + "enum_dispatch", + "exponential-backoff", + "futures", + "futures-util", + "h2", + "hickory-resolver", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "ipnet", + "once_cell", + "orion-configuration", + "orion-data-plane-api", + "orion-error", + "orion-xds", + "parking_lot", + "pin-project", + "pingora-timeout", + "pretty-duration", + "rand", + "regex", + "rustc-hash 2.1.1", + "rustls", + "rustls-pemfile", + "rustls-platform-verifier", + "rustls-webpki", + "serde", + "serde_yaml", + "thiserror 1.0.64", + "thread_local", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.5.1", + "tower-service", + "tracing", + "tracing-test", + "twox-hash", + "typed-builder", + "url", + "x509-parser", +] + +[[package]] +name = "orion-proxy" +version = "0.1.0" +dependencies = [ + "abort-on-drop", + "affinity", + "caps", + "console-subscriber", + "dhat", + "futures", + "num_cpus", + "orion-configuration", + "orion-error", + "orion-lib", + "orion-xds", + "regex", + "serde", + "tikv-jemallocator", + "tokio", + "tracing", + "tracing-appender", + "tracing-subscriber", + "tracing-test", +] + +[[package]] +name = "orion-xds" +version = "0.1.0" +dependencies = [ + "async-stream", + "atomic-take", + "bytes", + "futures", + "http", + "orion-configuration", + "orion-data-plane-api", + "orion-error", + "serde", + "serde_yaml", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.6.0", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pingora-timeout" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f54daa3e32391201cfabde4dc1c2ecdfa60b4d6695ef47df56f42c55792ee3" +dependencies = [ + "futures", + "once_cell", + "parking_lot", + "pin-project-lite", + "thread_local", + "tokio", +] + +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "pretty-duration" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8868e7264af614b3634ff0abbe37b178e61000611b8a75221aea40221924aba" + +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +dependencies = [ + "bytes", + "heck", + "itertools 0.13.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-reflect" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b7535b02f0e5efe3e1dbfcb428be152226ed0c66cad9541f2274c8ba8d4cd40" +dependencies = [ + "base64 0.22.1", + "once_cell", + "prost", + "prost-reflect-derive", + "prost-types", + "serde", + "serde-value", +] + +[[package]] +name = "prost-reflect-derive" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fce6b22f15cc8d8d400a2b98ad29202b33bd56c7d9ddd815bc803a807ecb65" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +dependencies = [ + "prost", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.38.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" + +[[package]] +name = "rustls-platform-verifier" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs 0.7.3", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "num-bigint", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_regex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8136f1a4ea815d7eac4101cfd0b16dc0cb5e1fe1b8609dfd728058656b7badf" +dependencies = [ + "regex", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.6.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +dependencies = [ + "thiserror-impl 1.0.64", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thousands" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn", +] + +[[package]] +name = "tonic-health" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1eaf34ddb812120f5c601162d5429933c9b527d901ab0e7f930d3147e33a09b2" +dependencies = [ + "async-stream", + "prost", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.64", + "time", + "tracing-subscriber", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "rand", + "static_assertions", +] + +[[package]] +name = "typed-builder" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77739c880e00693faef3d65ea3aad725f196da38b22fdc7ea6ded6e1ce4d3add" +dependencies = [ + "typed-builder-macro", +] + +[[package]] +name = "typed-builder-macro" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f718dfaf347dcb5b983bfc87608144b0bad87970aebcbea5ce44d2a30c08e63" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +dependencies = [ + "form_urlencoded", + "idna 0.5.0", + "percent-encoding", +] + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +dependencies = [ + "getrandom", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "webpki-roots" +version = "0.26.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "widestring" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 1.0.64", + "time", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..2a93bea0 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,122 @@ +[workspace] +members = [ + "envoy-data-plane-api", + "orion-configuration", + "orion-data-plane-api", + "orion-error", + "orion-lib", + "orion-proxy", + "orion-xds", +] +resolver = "2" + +[workspace.dependencies] +envoy-data-plane-api = { path = "envoy-data-plane-api" } +orion-configuration = { path = "orion-configuration" } +orion-data-plane-api = { path = "orion-data-plane-api" } +orion-error = { path = "orion-error" } +orion-lib = { path = "orion-lib" } +orion-xds = { path = "orion-xds" } + + +abort-on-drop = "0.2" +bytes = "1" +compact_str = { version = "0.8.0", features = ["serde"] } +enum_dispatch = "0.3.13" +exponential-backoff = "1.2.0" +futures = "0.3" +http = "1.0" +http-body = "1.0" +http-body-util = "0.1.0" +hyper-util = { version = "0.1.3", features = ["full"] } +num_cpus = "1" +parking_lot = "0.12.3" +prost = "0.13" +prost-build = "^0.13" +prost-reflect = { version = "0.14", features = ["derive", "serde"] } +regex = "1.10.2" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_yaml = "0.9.34" +thiserror = "1.0.57" +tokio = { version = "1.37", features = ["full"] } +tokio-stream = { version = "0.1.17", features = ["net", "sync"] } +tonic = "^0.12" +tonic-build = "^0.12" +tonic-health = "^0.12" +tower = { version = "0.5.1", features = ["make"] } +tracing = "0.1" +tracing-subscriber = "0.3" +tracing-test = { version = "0.2", features = ["no-env-filter"] } +url = "2" + + +[profile.release] +codegen-units = 1 +debug = false +debug-assertions = false +incremental = false +lto = true +opt-level = 3 +overflow-checks = false +panic = 'unwind' +rpath = false +strip = "debuginfo" + +[profile.release-debuginfo] +debug = "full" +inherits = "release" +split-debuginfo = "packed" +strip = false + +[profile.release-dhat] +debug = 1 +inherits = "release" + +[workspace.lints.clippy] +correctness = { level = "deny", priority = 10 } +disallowed-methods = "deny" +expect_used = "warn" # FIXME this should be deny +panic = "warn" # FIXME this should be deny +todo = "deny" +transmute_ptr_to_ptr = "deny" +unchecked_duration_subtraction = "deny" +unused_async = "deny" +unwrap_used = "deny" + +# A subset of pedantic as warn +cast_lossless = "warn" +cast_possible_truncation = "warn" +cast_possible_wrap = "warn" +cast_precision_loss = "warn" +cast_ptr_alignment = "warn" +cast_sign_loss = "warn" +checked_conversions = "warn" +if_then_some_else_none = "warn" +inconsistent_struct_constructor = "allow" +invalid_upcast_comparisons = "warn" +large_futures = "warn" +large_stack_arrays = "warn" +large_types_passed_by_value = "warn" +map_unwrap_or = "allow" +maybe_infinite_iter = "warn" +missing_errors_doc = "allow" +missing_panics_doc = "allow" +doc_markdown = "allow" +module_name_repetitions = "allow" +must_use_candidate = "allow" +needless_raw_string_hashes = "allow" +pedantic = { level = "warn", priority = -1 } +print_stderr = "warn" +print_stdout = "warn" +ptr_cast_constness = "warn" +range_minus_one = "warn" +range_plus_one = "warn" +redundant_closure_for_method_calls = "warn" +return_self_not_must_use = "warn" +same_functions_in_if_condition = "warn" +semicolon-if-nothing-returned = "allow" +similar_names = "warn" +str_to_string = "warn" +string_to_string = "warn" +unicode_not_nfc = "warn" diff --git a/README.md b/README.md new file mode 100644 index 00000000..782ddac8 --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +orion-proxy-logo + + + +## Introduction + +Orion Proxy is a high performance and memory safe implementation of popular [Envoy Proxy](https://www.envoyproxy.io/). Orion Proxy is implemented in Rust using high-quality open source components. + +### Key features + +**Memory Safety** + +Rust programming language allows to avoid a whole lot of bugs related to memory management and data races making Orion Proxy a very robust and secure application. + + +**Performance** + +Orion Proxy offers 2x-4x better throughput and latency than Envoy Proxy. Refer to [Performance](docs/performance/performance.md) to see performance figures and for more details how we tested Orion Proxy . + + +**Compatibility** + +Orion Proxy configuration is generated from Envoy's xDS protobuf definitions. Orion Proxy aims to be a drop in replacement for Envoy. + + + +## Quick Start + +### Building +```console +git clone https://github.com/kmesh-net/orion +cd orion +git submodule init +git submodule update --force +cargo build +``` + + +### Running +```console +cargo run --bin orion -- --config orion/conf/orion-runtime.yaml +``` + + + + +## License + +Orion Proxy is licensed under the +[Apache License, Version 2.0](./LICENSE). diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md new file mode 100644 index 00000000..13f096bd --- /dev/null +++ b/RELEASE-NOTES.md @@ -0,0 +1,7 @@ +## [0.1.0] + +> First public release of Orion Proxy. + +>[!CAUTION] +This project is still very unstable and not ready for use in production environments. + diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..106a4e09 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,15 @@ +# for `disallowed_method`: +# https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_method +disallowed-methods = [ + { path = "orion_data_plane_api::decode::SourceFmt::Protobuf", reason = "Should not be used anymore. Use TryFrom instead." }, + { path = "orion_data_plane_api::decode::SourceFmt::Yaml", reason = "Should not be used anymore. Use TryFrom instead." }, + { path = "tokio::time::timeout", reason = "Use pingora_timeout::fast_timeout::fast_timeout instead" }, +] + +# The maximum number of function parameters is 5. +too-many-arguments-threshold = 5 + +allow-expect-in-tests = true +allow-print-in-tests = true +allow-unwrap-in-tests = true +check-private-items = false diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..b255629f --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,43 @@ +FROM rust:1.84 AS orion-builder + +#RUN rustup component add rustfmt + +WORKDIR /tmp/orion + +RUN < + +#### Latency in microseconds +latency in microseconds + +### HTTPS +#### Requests per second +requests per second + +#### Latency in microseconds +requests per second + +### TCP +#### Requests per second +requests per second + +#### Latency in microseconds +requests per second + +## Test Methodology and Testbed +testbed + + +* Requests generator (wrk): + - 2048 and 16384 connections tests + - 45s duration per test + - 36 threads/18 cores for the application on NUMA0 + - 6 cores assigned to IRQ handling on NUMA0 + - Measures req/s, throughput, latency (min, max, mean, 99.9th, 99.999th) +* Upstream Cluster (Nginx): + - In-memory direct response of 12B size + - 17 cores assigned to the application on NUMA1 + - 7 cores assigned to IRQ handling on NUMA1 + - 2.6M RPS a hard-limit of the server + - Measures #connections established upstream + +* DUT (Proxy): + - Comparison Envoy, Ng-proxy, and Nginx + - Scaling tested with 1, 2, 4, 8, 16, 24, 32 cores assigned to the proxy + - TCP termination toward single upstream cluster + +* DUT (Tunings): + - IRQ affinity + - 8 cores reserved for the IRQ affinity. + - 24 cores allocated for proxy (per NUMA node). +* All interfaces are 25Gbps Mellanox and Intel NICs diff --git a/docs/pics/logo/orion_proxy_logo.png b/docs/pics/logo/orion_proxy_logo.png new file mode 100644 index 00000000..83887e3d Binary files /dev/null and b/docs/pics/logo/orion_proxy_logo.png differ diff --git a/docs/pics/logo/orion_proxy_logo.xcf b/docs/pics/logo/orion_proxy_logo.xcf new file mode 100644 index 00000000..141b69f9 Binary files /dev/null and b/docs/pics/logo/orion_proxy_logo.xcf differ diff --git a/docs/pics/orion.svg b/docs/pics/orion.svg new file mode 100644 index 00000000..914d91e4 --- /dev/null +++ b/docs/pics/orion.svg @@ -0,0 +1,4 @@ + + + +
Tokio Runtime (s)
Listener Manager
Event loop
Cluster Manager
Event loop
Cluster
Load Assignment
Load Balancer
Endpoint

Hyper Client
* is a service
* creates stream
Listener
Filterchain
HttpManager Filter (service)
TcpProxy Filter
TCP
TLS
Http1.1 or Http2
Listener Configuration Channel (s)
Listener configuration (Add/Remove)
TLS Configuration ( Filterchain)
Route Configuration (HttpManager/Route)
Cluster Configuration Channel (s)
Cluster Configuration(Add/Remove)
Load Assignment (Change)
TLS Configuration (Cluster/Endpoint)

TCP
TLS
Http1.1 or Http2
To Upstream
From Downstream
Cluster Selection Channel
Select a cluster and endpoint
Returns a service to manager connectivity
Hyper Server
* consumes stream
* needs service to process

Tokio Runtime
XDS
GRPC/Tonic
Static Bootstrap
External Configuration Service (XDS)
Listener updates
Cluster updates
Route updates
Secrets (TLS) Updates
Routes
\ No newline at end of file diff --git a/docs/pics/performance/baseline/wrk-latency.png b/docs/pics/performance/baseline/wrk-latency.png new file mode 100644 index 00000000..6dfcc765 Binary files /dev/null and b/docs/pics/performance/baseline/wrk-latency.png differ diff --git a/docs/pics/performance/baseline/wrk-reqsec.png b/docs/pics/performance/baseline/wrk-reqsec.png new file mode 100644 index 00000000..ba55bece Binary files /dev/null and b/docs/pics/performance/baseline/wrk-reqsec.png differ diff --git a/docs/pics/performance/tcp/wrk-latency.png b/docs/pics/performance/tcp/wrk-latency.png new file mode 100644 index 00000000..49f6abb8 Binary files /dev/null and b/docs/pics/performance/tcp/wrk-latency.png differ diff --git a/docs/pics/performance/tcp/wrk-reqsec.png b/docs/pics/performance/tcp/wrk-reqsec.png new file mode 100644 index 00000000..82cdb520 Binary files /dev/null and b/docs/pics/performance/tcp/wrk-reqsec.png differ diff --git a/docs/pics/performance/testbed_conf.png b/docs/pics/performance/testbed_conf.png new file mode 100644 index 00000000..8b4c9837 Binary files /dev/null and b/docs/pics/performance/testbed_conf.png differ diff --git a/docs/pics/performance/tls/wrk-latency.png b/docs/pics/performance/tls/wrk-latency.png new file mode 100644 index 00000000..a93f0a3a Binary files /dev/null and b/docs/pics/performance/tls/wrk-latency.png differ diff --git a/docs/pics/performance/tls/wrk-reqsec.png b/docs/pics/performance/tls/wrk-reqsec.png new file mode 100644 index 00000000..8aab8ae8 Binary files /dev/null and b/docs/pics/performance/tls/wrk-reqsec.png differ diff --git a/docs/videos/orion-demo-lb.webm b/docs/videos/orion-demo-lb.webm new file mode 100644 index 00000000..b9eacf0c Binary files /dev/null and b/docs/videos/orion-demo-lb.webm differ diff --git a/envoy-data-plane-api/Cargo.toml b/envoy-data-plane-api/Cargo.toml new file mode 100644 index 00000000..aac3019d --- /dev/null +++ b/envoy-data-plane-api/Cargo.toml @@ -0,0 +1,25 @@ +[package] +description = "Envoy xDS protobuf and gRPC definitions" +edition = "2021" +license = "MIT" +name = "envoy-data-plane-api" +version = "0.1.9" + +[dependencies] +prost.workspace = true +prost-reflect = { version = "0.14", features = ["derive", "serde"] } +serde.workspace = true +serde_json.workspace = true +serde_yaml.workspace = true +tonic = "^0.12" +tonic-health = "^0.12" + +[build-dependencies] +glob = "^0.3" +prost-build.workspace = true +prost-reflect = { version = "0.14", features = ["derive", "serde"] } +tonic-build.workspace = true + + +[lib] +doctest = false diff --git a/envoy-data-plane-api/build.rs b/envoy-data-plane-api/build.rs new file mode 100644 index 00000000..fe472788 --- /dev/null +++ b/envoy-data-plane-api/build.rs @@ -0,0 +1,54 @@ +use std::path::PathBuf; + +use glob::glob; + +/// std::env::set_var("PROTOC", The Path of Protoc); +fn main() -> std::io::Result<()> { + let descriptor_path = PathBuf::from(std::env::var("OUT_DIR").unwrap()).join("proto_descriptor.bin"); + let protos: Vec = glob("data-plane-api/envoy/**/v3/*.proto").unwrap().filter_map(Result::ok).collect(); + + let include_paths = [ + "data-plane-api/", + "xds/", + "protoc-gen-validate/", + "googleapis/", + "opencensus-proto/src/", + "opentelemetry-proto/", + "prometheus-client-model/", + "cel-spec/proto", + ]; + + let mut config = prost_build::Config::new(); + config + .file_descriptor_set_path(descriptor_path.clone()) + .enable_type_names() + .compile_well_known_types() + .include_file("mod.rs"); + + // this is the same as prost_reflect_build::Builder::configure but we + // cannot use it due to different versions of prost_build in dependencies + // + // This requires our crate to provide FILE_DESCRIPTOR_SET_BYTES an include bytes + // of descriptor_path + config.compile_protos(&protos, &include_paths)?; + let pool_attribute = r#"#[prost_reflect(file_descriptor_set_bytes = "crate::FILE_DESCRIPTOR_SET_BYTES")]"#; + + let buf = std::fs::read(&descriptor_path)?; + let descriptor = prost_reflect::DescriptorPool::decode(buf.as_ref()).expect("Invalid file descriptor"); + for message in descriptor.all_messages() { + let full_name = message.full_name(); + config + .type_attribute(full_name, "#[derive(::prost_reflect::ReflectMessage)]") + .type_attribute(full_name, format!(r#"#[prost_reflect(message_name = "{}")]"#, full_name,)) + .type_attribute(full_name, pool_attribute); + } + + // Proceed w/ tonic_build + tonic_build::configure().build_server(true).build_client(true).compile_with_config( + config, + &protos, + &include_paths, + )?; + + Ok(()) +} diff --git a/envoy-data-plane-api/cel-spec b/envoy-data-plane-api/cel-spec new file mode 160000 index 00000000..a8f582aa --- /dev/null +++ b/envoy-data-plane-api/cel-spec @@ -0,0 +1 @@ +Subproject commit a8f582aae6a65b5c417c0ab7d22aab68f41ec4b2 diff --git a/envoy-data-plane-api/data-plane-api b/envoy-data-plane-api/data-plane-api new file mode 160000 index 00000000..9a873de7 --- /dev/null +++ b/envoy-data-plane-api/data-plane-api @@ -0,0 +1 @@ +Subproject commit 9a873de7a976b7e51d6357066d799f2ac76ae88b diff --git a/envoy-data-plane-api/googleapis b/envoy-data-plane-api/googleapis new file mode 160000 index 00000000..29c240bc --- /dev/null +++ b/envoy-data-plane-api/googleapis @@ -0,0 +1 @@ +Subproject commit 29c240bcc463751f6224deb048f3332bddf4c3ef diff --git a/envoy-data-plane-api/opencensus-proto b/envoy-data-plane-api/opencensus-proto new file mode 160000 index 00000000..1664cc96 --- /dev/null +++ b/envoy-data-plane-api/opencensus-proto @@ -0,0 +1 @@ +Subproject commit 1664cc961550be8f3058ddd29390350242f44f1f diff --git a/envoy-data-plane-api/opentelemetry-proto b/envoy-data-plane-api/opentelemetry-proto new file mode 160000 index 00000000..2dd1a231 --- /dev/null +++ b/envoy-data-plane-api/opentelemetry-proto @@ -0,0 +1 @@ +Subproject commit 2dd1a231f91a9d5547816b12d166ac2aa5a8fc0c diff --git a/envoy-data-plane-api/prometheus-client-model b/envoy-data-plane-api/prometheus-client-model new file mode 160000 index 00000000..96baa00f --- /dev/null +++ b/envoy-data-plane-api/prometheus-client-model @@ -0,0 +1 @@ +Subproject commit 96baa00f5eae5e872f10e34ee1c27ebb498feb7a diff --git a/envoy-data-plane-api/protoc-gen-validate b/envoy-data-plane-api/protoc-gen-validate new file mode 160000 index 00000000..12e179b1 --- /dev/null +++ b/envoy-data-plane-api/protoc-gen-validate @@ -0,0 +1 @@ +Subproject commit 12e179b1f96e4c07c83917f421ce3bb62b811f11 diff --git a/envoy-data-plane-api/src/lib.rs b/envoy-data-plane-api/src/lib.rs new file mode 100644 index 00000000..5caf30b8 --- /dev/null +++ b/envoy-data-plane-api/src/lib.rs @@ -0,0 +1,212 @@ +#![allow(clippy::all)] + +pub use prost; +pub use prost_reflect; +pub use tonic; +pub use tonic_health; + +// This needs to match the file descriptor path from build.rs +/// The binary contents of the protobuf descriptor +pub const FILE_DESCRIPTOR_SET_BYTES: &'static [u8] = include_bytes!(concat!(env!("OUT_DIR"), "/proto_descriptor.bin")); + +include!(concat!(env!("OUT_DIR"), "/mod.rs")); + +#[test] +fn test_json_any_decode() { + use crate::envoy::config::bootstrap::v3::Bootstrap; + use crate::envoy::config::core::v3::data_source::Specifier; + use crate::envoy::config::core::v3::transport_socket::ConfigType; + use crate::envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext; + use prost::{Message, Name}; + + const BOOTSTRAP_JSON: &str = r#"{ + "staticResources": { + "listeners": [ + { + "name": "server-1", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9000 + } + }, + "filterChains": [ + { + "filters": [], + "transportSocket": { + "name": "envoy.transport_sockets.tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "requireClientCertificate": true, + "commonTlsContext": { + "tlsParams": { + "tlsMinimumProtocolVersion": "TLSv1_3", + "tlsMaximumProtocolVersion": "TLSv1_3" + }, + "validationContext": { + "trustedCa": { + "filename": "./certs/ca.crt" + }, + "matchTypedSubjectAltNames": [ + { + "sanType": "DNS", + "matcher": { + "exact": "client.test" + } + } + ] + }, + "tlsCertificates": [ + { + "certificateChain": { + "filename": "./certs/server.test.ecdsa-p256.crt" + }, + "privateKey": { + "filename": "./certs/server.test.ecdsa-p256.key" + } + } + ] + } + } + } + } + ] + } + ] + } +} + "#; + + let pool = prost_reflect::DescriptorPool::decode(crate::FILE_DESCRIPTOR_SET_BYTES).unwrap(); + let message_descriptor = pool.get_message_by_name(&Bootstrap::full_name()).unwrap(); + let mut deserializer = serde_json::de::Deserializer::from_str(BOOTSTRAP_JSON); + let bootstrap_dyn = prost_reflect::DynamicMessage::deserialize(message_descriptor, &mut deserializer).unwrap(); + deserializer.end().unwrap(); + let bootstrap: Bootstrap = bootstrap_dyn.transcode_to().unwrap(); + + let bootstrap_tls_config = bootstrap.static_resources.as_ref().unwrap().listeners[0].filter_chains[0] + .transport_socket + .as_ref() + .unwrap() + .config_type + .as_ref() + .unwrap(); + let ConfigType::TypedConfig(tls_any) = bootstrap_tls_config; + if tls_any.type_url == "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" { + let tls_ctx_map = DownstreamTlsContext::decode(tls_any.value.as_slice()).unwrap(); + let ctx = tls_ctx_map.common_tls_context.unwrap(); + assert_eq!(ctx.tls_params.unwrap().tls_maximum_protocol_version, 4); + assert_eq!( + *ctx.tls_certificates[0].private_key.as_ref().unwrap().specifier.as_ref().unwrap(), + Specifier::Filename("./certs/server.test.ecdsa-p256.key".to_string()) + ); + } +} + +#[test] +fn test_yaml_any_decode() { + use crate::envoy::config::bootstrap::v3::Bootstrap; + use crate::envoy::config::listener::v3::filter::ConfigType; + use crate::envoy::config::route::v3::route_match::PathSpecifier; + use crate::envoy::extensions::filters::network::http_connection_manager::v3::http_connection_manager::RouteSpecifier; + use crate::envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; + use prost::{Message, Name}; + + const BOOTSTRAP_YAML: &str = r#" +--- +admin: + address: + socketAddress: + address: "127.0.0.1" + portValue: 9901 +node: + id: envoy-test-1 + cluster: envoy-test-cluster-1 +staticResources: + listeners: + - name: server-1 + address: + socketAddress: + address: 127.0.0.1 + portValue: 9000 + filterChains: + - filters: + - name: envoy.filters.network.http_connection_manager + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + httpFilters: + - name: envoy.filters.http.router + routeConfig: + name: local_route + virtualHosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + cluster: local-srv + transportSocket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + requireClientCertificate: true + commonTlsContext: + tlsParams: + tlsMinimumProtocolVersion: TLSv1_3 + tlsMaximumProtocolVersion: TLSv1_3 + validationContext: + trustedCa: + filename: ./certs/ca.crt + matchTypedSubjectAltNames: + - sanType: DNS + matcher: + exact: client.test + tlsCertificates: + - certificateChain: + filename: ./certs/server.test.ecdsa-p256.crt + privateKey: + filename: ./certs/server.test.ecdsa-p256.key + clusters: + - name: local-srv + type: STATIC + lbPolicy: ROUND_ROBIN + loadAssignment: + clusterName: local-srv + endpoints: + - lbEndpoints: + - endpoint: + address: + socketAddress: + address: "127.0.0.1" + portValue: 9110 + "#; + + let pool = prost_reflect::DescriptorPool::decode(crate::FILE_DESCRIPTOR_SET_BYTES).unwrap(); + let message_descriptor = pool.get_message_by_name(&Bootstrap::full_name()).unwrap(); + let deserializer = serde_yaml::Deserializer::from_str(BOOTSTRAP_YAML); + let bootstrap_dyn = prost_reflect::DynamicMessage::deserialize(message_descriptor, deserializer).unwrap(); + let bootstrap: Bootstrap = bootstrap_dyn.transcode_to().unwrap(); + + let config_type = bootstrap.static_resources.as_ref().unwrap().listeners[0].filter_chains[0].filters[0] + .config_type + .as_ref() + .unwrap(); + + if let ConfigType::TypedConfig(http_manager_any) = config_type { + if http_manager_any.type_url + == "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + { + let http_manager_map = HttpConnectionManager::decode(http_manager_any.value.as_slice()).unwrap(); + + let Some(RouteSpecifier::RouteConfig(route_config)) = http_manager_map.route_specifier else { + panic!("Expecting route specifier to hold route configuration: {:?}", http_manager_map.route_specifier); + }; + + assert_eq!(route_config.name, "local_route"); + let route_match = route_config.virtual_hosts[0].routes[0].r#match.as_ref().unwrap(); + assert_eq!(*route_match.path_specifier.as_ref().unwrap(), PathSpecifier::Prefix("/".to_string())); + } + } +} diff --git a/envoy-data-plane-api/xds b/envoy-data-plane-api/xds new file mode 160000 index 00000000..2ac532fd --- /dev/null +++ b/envoy-data-plane-api/xds @@ -0,0 +1 @@ +Subproject commit 2ac532fd44436293585084f8d94c6bdb17835af0 diff --git a/orion-configuration/Cargo.toml b/orion-configuration/Cargo.toml new file mode 100644 index 00000000..8f9f2e6a --- /dev/null +++ b/orion-configuration/Cargo.toml @@ -0,0 +1,42 @@ +[package] +edition = "2021" +name = "orion-configuration" +publish = ["rust-inhuawei-com"] +version = "0.1.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +base64 = "0.22.1" +base64-serde = "0.7.0" +bytes.workspace = true +clap = { version = "4.4.8", features = ["derive"] } +compact_str.workspace = true +envoy-data-plane-api = { workspace = true, optional = true } +exponential-backoff.workspace = true +http.workspace = true +http-serde-ext = "1.0.2" +humantime-serde = "1.1.1" +ipnet = { version = "2.9", features = ["serde"] } +num_cpus.workspace = true +orion-data-plane-api = { workspace = true, optional = true } +orion-error.workspace = true +regex.workspace = true +serde = { workspace = true, features = ["rc"] } +serde_path_to_error = "0.1.16" +serde_regex = "1.1.0" +serde_yaml.workspace = true +thiserror.workspace = true +tracing.workspace = true +tracing-subscriber = { workspace = true, features = ["env-filter", "std"] } +url.workspace = true + +[dev-dependencies] +tracing-test.workspace = true + +[features] +default = ["envoy-conversions"] +envoy-conversions = ["dep:orion-data-plane-api"] + +[lints] +workspace = true diff --git a/orion-configuration/bootstrap.yaml b/orion-configuration/bootstrap.yaml new file mode 100644 index 00000000..6a686d88 --- /dev/null +++ b/orion-configuration/bootstrap.yaml @@ -0,0 +1,137 @@ + + staticResources: + listeners: + - name: listener-1 + listener_filters: + ##### Envoy needs it to pass SNI to select different filterchains + - name: listener_https_1_tls_inspector + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector + enable_ja3_fingerprinting: false + address: + socketAddress: + address: 127.0.0.1 + portValue: 8000 + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_con_man + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + statPrefix: ingress_http + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: local_route + virtualHosts: + - name: local_service + domains: ["*"] # for some reason hyper returns host 127.0.0.1 for https and host none for http + request_headers_to_add: + - header: + key: "my-custom-header" + value: "value" + routes: + - match: + prefix: "/dr" + direct_response: + status: "200" + body: + inline_string: "This is a direct response" + - match: + prefix: "/" + route: + cluster: cluster1 + + + # route: + # cluster: cluster2 + # - name: listener-2 + # address: + # socketAddress: + # address: 192.168.1.1 + # portValue: 8081 + # filterChains: + # - filters: + # - name: envoy.filters.network.http_connection_manager + # typedConfig: + # "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + # statPrefix: ingress_http + # httpFilters: + # - name: envoy.filters.http.router + # routeConfig: + # name: local_route + # virtualHosts: + # - name: local_service + # domains: ["*"] + # routes: + # - match: + # prefix: "/proxy/" + # route: + # cluster: cluster3 + + + clusters: + - name: cluster1 + # type: STATIC + lbPolicy: ROUND_ROBIN + loadAssignment: + clusterName: cluster1 + endpoints: + - lbEndpoints: + - endpoint: + address: + socketAddress: + address: "127.0.0.1" + portValue: 5454 + # transport_socket: + # name: envoy.transport_sockets.tls + # typed_config: + # '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + # sni: "backend.localhost" + # common_tls_context: + # validation_context_sds_secret_config: + # name: demo_ca_validation + # - name: cluster2 + # type: STATIC + # lbPolicy: LEAST_REQUEST + # loadAssignment: + # clusterName: cluster2 + # endpoints: + # - lbEndpoints: + # - endpoint: + # address: + # socketAddress: + # address: "127.0.0.1" + # portValue: 80 + + # - name: cluster3 + # type: STATIC + # lbPolicy: ROUND_ROBIN + # loadAssignment: + # clusterName: cluster3 + # endpoints: + # - lbEndpoints: + # - endpoint: + # address: + # socketAddress: + # address: "192.168.2.10" + # portValue: 81 + # http2_protocol_options: + # max_concurrent_streams: 1000 + # typed_extension_protocol_options: + # envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + # "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + # upstream_http_protocol_options: + # auto_sni: true + # common_http_protocol_options: + # idle_timeout: 1s + # explicit_http_config: + # http2_protocol_options: + # max_concurrent_streams: 100 + diff --git a/orion-configuration/examples/convert.rs b/orion-configuration/examples/convert.rs new file mode 100644 index 00000000..6dbad453 --- /dev/null +++ b/orion-configuration/examples/convert.rs @@ -0,0 +1,17 @@ +#![allow(clippy::print_stdout)] +use orion_configuration::config::Bootstrap; +use orion_error::{Result, ResultExtension}; +use std::fs::File; + +fn main() -> Result<()> { + let bootstrap = + Bootstrap::deserialize_from_envoy(File::open("bootstrap.yaml").context("failed to open bootstrap.yaml")?) + .context("failed to convert envoy to orion")?; + let yaml = serde_yaml::to_string(&bootstrap).context("failed to serialize orion")?; + std::fs::write("orion.yaml", yaml.as_bytes())?; + let bootstrap: Bootstrap = serde_yaml::from_reader(File::open("orion.yaml").context("failed to open orion.yaml")?) + .context("failed to read yaml from file")?; + let yaml = serde_yaml::to_string(&bootstrap).context("failed to round-trip serialize orion")?; + println!("{yaml}"); + Ok(()) +} diff --git a/orion-configuration/ng.yaml b/orion-configuration/ng.yaml new file mode 100644 index 00000000..dce95c16 --- /dev/null +++ b/orion-configuration/ng.yaml @@ -0,0 +1,36 @@ +static_resources: + listeners: + - name: listener-1 + address: 127.0.0.1:8000 + filter_chains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + terminal_filter: !Http + codec_type: Auto + route_specifier: + name: local_route + virtual_hosts: + - name: local_service + domains: + - '*' + routes: + - route_match: + Prefix: /dr + case_sensitive: true + DirectResponse: + status: 200 + InlineString: This is a direct response + - route_match: + Prefix: / + case_sensitive: true + Route: + cluster_specifier: cluster1 + cluster_not_found_response_code: 503 + request_headers_to_add: + - header: + key: my-custom-header + value: value + append_action: AppendIfExistsOrAdd + keep_empty_value: false + with_tls_inspector: true diff --git a/orion-configuration/src/config.rs b/orion-configuration/src/config.rs new file mode 100644 index 00000000..572af746 --- /dev/null +++ b/orion-configuration/src/config.rs @@ -0,0 +1,210 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod bootstrap; +pub use bootstrap::Bootstrap; +pub mod cluster; +pub use cluster::Cluster; +pub mod core; +pub mod listener; +pub use listener::Listener; +pub mod listener_filters; +pub mod log; +pub use log::Log; +pub mod network_filters; +pub mod runtime; +pub use runtime::Runtime; +pub mod common; +pub mod secret; +pub mod transport; + +pub(crate) mod util; + +pub use crate::config::common::*; +use crate::{options::Options, Result}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use std::{fs::File, path::Path}; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct Config { + #[serde(skip_serializing_if = "is_default", default)] + pub runtime: Runtime, + #[serde(skip_serializing_if = "is_default", default)] + pub logging: Log, + #[serde(skip_serializing_if = "is_default", default)] + pub bootstrap: Bootstrap, +} + +impl Config { + fn apply_options(self, opt: &Options) -> Self { + let runtime = self.runtime.update_from_env_and_options(opt); + let max_cpus = num_cpus::get(); + if runtime.num_cpus() > max_cpus { + tracing::warn!(max_cpus, ORION_GATEWAY_CORES = runtime.num_cpus(), "Requested more cores than available CPUs"); + } + if runtime.num_runtimes() > runtime.num_cpus() { + tracing::warn!( + runtime.num_cpus, + ORION_GATEWAY_RUNTIMES = runtime.num_runtimes(), + "Requested more runtimes than cores" + ); + } + Self { runtime, ..self } + } + + #[cfg(not(feature = "envoy-conversions"))] + pub fn new(opt: &Options) -> Result { + deserialize_yaml(&opt.config).map(|conf| conf.apply_options(opt)) + } +} + +pub fn deserialize_yaml(path: &Path) -> Result { + let file = File::open(path)?; + serde_path_to_error::deserialize(serde_yaml::Deserializer::from_reader(&file)).map_err(crate::Error::from) +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + use std::path::Path; + + use super::{deserialize_yaml, Bootstrap, Config}; + use crate::{ + config::{log::Log, runtime::Runtime}, + options::Options, + Result, + }; + use orion_data_plane_api::decode::from_serde_deserializer; + pub use orion_data_plane_api::envoy_data_plane_api::envoy::config::bootstrap::v3::Bootstrap as EnvoyBootstrap; + use orion_error::ResultExtension; + use serde::Deserialize; + + #[derive(Deserialize)] + struct Wrapper(#[serde(deserialize_with = "orion_data_plane_api::decode::from_serde_deserializer")] EnvoyBootstrap); + + #[derive(Deserialize)] + struct ShimConfig { + #[serde(default)] + pub runtime: Runtime, + #[serde(default)] + pub logging: Log, + #[serde(default)] + pub bootstrap: Option, + + pub envoy_bootstrap: Option, + } + + fn bootstrap_from_path_to_envoy_bootstrap(envoy_path: impl AsRef) -> Result { + (|| -> Result<_> { + let envoy_file = std::fs::File::open(&envoy_path).context("failed to open file")?; + let mut track = serde_path_to_error::Track::new(); + let envoy: EnvoyBootstrap = from_serde_deserializer(serde_path_to_error::Deserializer::new( + serde_yaml::Deserializer::from_reader(&envoy_file), + &mut track, + )) + .with_context(|| format!("failed to deserialize {}", track.path().to_string()))?; + Bootstrap::try_from(envoy).context("failed to convert into orion bootstrap") + })() + .with_context(|| format!("failed to read config from \"{}\"", envoy_path.as_ref().display())) + } + + impl Config { + pub fn new(opt: &Options) -> Result { + let config = match (&opt.config_files.config, &opt.config_files.bootstrap_override) { + (None, None) => return Err("no config file specified".into()), + (None, Some(envoy_path)) => { + let bootstrap = bootstrap_from_path_to_envoy_bootstrap(envoy_path)?; + Self { runtime: Runtime::default(), logging: Log::default(), bootstrap } + }, + (Some(config), maybe_override) => { + let ShimConfig { runtime, logging, bootstrap, envoy_bootstrap } = deserialize_yaml(&config) + .with_context(|| format!("failed to deserialize \"{}\"", config.display()))?; + let mut bootstrap = match (bootstrap, envoy_bootstrap) { + (None, None) => Bootstrap::default(), + (Some(b), None) => b, + (None, Some(envoy)) => Bootstrap::try_from(envoy.0) + .context("failed to convert envoy bootstrap to orion bootstrap")?, + (Some(_), Some(_)) => { + return Err("only one of `bootstrap` and `envoy_bootstrap` may be set".into()) + }, + }; + if let Some(bootstrap_override) = maybe_override { + bootstrap = bootstrap_from_path_to_envoy_bootstrap(&bootstrap_override)?; + } + Self { runtime, logging, bootstrap } + }, + }; + Ok(config.apply_options(opt)) + } + } + #[cfg(test)] + mod tests { + use crate::{config::Config, options::Options, Result}; + use tracing_test::traced_test; + #[test] + #[traced_test] + fn roundtrip_configs() -> Result<()> { + let paths = std::fs::read_dir("../orion-proxy/conf")?; + + for path in paths { + let path = path?.path(); + + if Some("yaml") == path.extension().map(|os| os.to_str().unwrap()) + && path.file_name().is_some_and(|os| { + let as_str = os.to_str().unwrap(); + as_str.starts_with("orion-") || as_str.starts_with("envoy-") + }) + { + tracing::info!("parsing {}", path.display()); + let new_conf = Config::new(&if path + .file_name() + .is_some_and(|os| os.to_str().unwrap().starts_with("orion-")) + { + Options::from_path(path.clone()) + } else { + Options::from_path_to_envoy(path.clone()) + })?; + let serialized = serde_yaml::to_string(&new_conf)?; + tracing::info!("\n{serialized}\n"); + // if !path.ends_with("new.yaml") { + // let new_path = format!( + // "../orion-proxy/conf/{}-new.yaml", + // path.file_name() + // .unwrap() + // .to_str() + // .unwrap() + // .trim_end_matches(".yaml") + // .replace("envoy-", "orion-") + // ); + // std::fs::write(new_path, serialized.as_bytes())?; + // } + let deserialized: Config = serde_yaml::from_str(&serialized)?; + if new_conf != deserialized { + tracing::info!("\n{}\n", serde_yaml::to_string(&deserialized)?); + panic!("failed to roundtrip config transcoding") + } + } else { + tracing::info!("skipping {}", path.display()) + } + } + + Ok(()) + } + } +} diff --git a/orion-configuration/src/config/README.md b/orion-configuration/src/config/README.md new file mode 100644 index 00000000..32bcbb7d --- /dev/null +++ b/orion-configuration/src/config/README.md @@ -0,0 +1,149 @@ +## Guiding principle + +We aim for NG to be compatible with a subset of Envoy in order to ease adoption. Ideally, NG could serve as a drop-in replacement for Envoy and should be able to use the same control planes. + +However, since we plan to only ever support a _subset_ of Envoy features there will be times where NG cannot serve as a drop-in replacement. In order to make sure that our users can trust NG we have to make them the following guarantee + +> If NG accepts a configuration which was meant for Envoy, it will behave essentially like Envoy. If at any point we diverge from Envoys behaviour it will be documented and the reasoning will be explained. If NG cannot honour a config in the same way Envoy does, we will refuse to parse it (if loading from a file) or reject the update (if using xDS). + +here _"essentially like Envoy"_ means that we will follow the behaviour of Envoy at a high-level. Filters will be executed in the expected order, request will be routed or rejected as expected, but there might be minor differences in less-crucial behaviours. Like exposed statistics, or implementation details of load balancing algorithms. Basically, it should be _safe_ to replace Envoy with NG. + +This crate is responsible for upholding this guarantee at the config parser level. I makes invalid envoy configs unrepresentable in the code. `orion-lib` is responsible for implementing the correct behaviour on top of it. + + +## Dev-guide + +### Destructuring +When implementing a function to convert from an envoy structure to a NG structure, always make sure to destructure the Envoy structure early on, like so: +```rs +fn try_from(envoy_struct: Envoy) -> T { + let Envoy { name, enabled, filters, mode } = envoy_struct; + // ... +} +``` + +That way, you know for sure that you are using all the fields of the struct, or you get an unused variable warning. + +Fields which are not supported, and who should give an error if used, should be wrapped inside of a `unsupported_field!` macro. +Like so: +```rs +fn try_from(envoy_struct: Envoy) -> T { + let Envoy { name, enabled, filters, mode } = envoy_struct; + unsupported_field!(enabled, filters)?; +} +``` +This macro takes a list of identifiers and will return a `Err(GenericError::UnsupportedField("$[field name]"))` if any are used. + +Likewise, fields which are _required_ should be wrapped with `required!(field)` first. This macro will throw an error if a field was not filled out in the config. This applies to options of course but also applies to other structures such as `Vec` or `String` as the YAML/Protobuf parser will populate these fields with their default empty values. + +For example: +```rs +fn try_from(envoy_struct: Envoy) -> T { + let Envoy { name, enabled, filters, mode } = envoy_struct; + unsupported_field!(enabled, filters)?; + // if name == "" this will error out. + let name = required!(name)?; +} +``` + +You should also apply this trick when parsing nested items, like structs inside of an enum: + +```rs +fn try_from(envoy_struct: Envoy) -> T { + let Envoy { name, enabled, filters, mode } = envoy_struct; + unsupported_field!(enabled, filters)?; + match mode { + EnvoyMode::ModeA(EnvoyModeA{is_flarb, total_blobs}) => { + unsupported_field(total_blobs)?; + required!(is_flarb)?; + //... + } + EnvoyMode::SimpleBool(x: bool) => { /*...*/ } + EnvoyMode::WrappedBool(EnvoyBool{value}) => { /*...*/ } + } +} +``` + +### Determining which fields are required or unsupported + +By default, you should mark _all_ fields as unsupported, and only selectively allow those fields which are handled by NG. +The guiding principle should be that _any config_ that is accepted by NG should produce the same behaviour it does in Envoy. + +Some fields might have additional requirements that are not captured by the Envoy structure. Such as a vector or option being non-empty, or a string matching a specific regex. You can find these requirements as annotations in the Protobuf files on [Envoy's Github](https://github.com/envoyproxy/envoy/tree/main/api/envoy). + +For example, +```protobuf + // A port number that describes the destination port connecting to. + uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; +``` +says that the field `destination_port` is a `u32` which has to be less-than-or-equal-to 65535. + + + Sometimes we might need to add more complicated checks to ensure we behave as Envoy would do, such as making sure that a filter-chain has at most 1 rate-limiter, and it appears after any RBAC filters. It is crucial to include these checks, as accepting a config but behaving differently negatively affects user experience at best and could have serious security implications at worst. + +### Other utility macros + +`envoy.rs` contains a few utility macros to make all of this easier. We already saw `required!` and `unsupported_field!` but it also contains functions for converting a vec of structs and optionally checking that it is non-empty (`convert_vec!`/`convert_non_empty_vec!`) or converting an option and making sure it is `Some` (`convert_opt!`). + +It is important to note that all these macros use the name of the first argument to generate their error string, so make sure the variable name matches the field in question. + +```rs +let doesnt_matter = convert_opt(filter)?;//will say the field "filter" is missing on an error +let doesnt_matter = convert_opt(some_option.map(some_func))?; // don't +``` + +### Error handling + +This crate heavily uses the error type `GenericError`. It is a small wrapper error that handles all errors that can occur during parsing (unsupported field used, unsupported enum variant used, required field is missing, a different error occurred while parsing a field - e.g. an invalid port number was set). + +It builds a chain of fields that are currently being parsed through the use of the `Result::with_node(impl Into>)` function and `GenericError::with_node(impl Into>)` function. +When you enter a subfield of a struct, you should always add a `with_node()` to the end of the result, like so +(**note:** macros will add these automatically). + +```rs +let filter = Filter::try_from(filter).with_node("filter")?; +let other_field = convert_opt(other_field)?; //macro automatically adds `with_node("other_field")` +``` +if the struct you are parsing has a `name` field, you should also add it as a node as early as possible, like so: +```rs +fn try_from(envoy_struct: Envoy) -> T { + let Envoy { name, enabled, filters, mode } = envoy_struct; + let name = required!(name)?; + { + unsupported_field!(enabled, filters)?; + // ... + Ok(Self{ .. }) + }.with_node(name) +} +``` + +Sometimes, when doing a larger more complicated conversion you might find it hard to keep track of things or satisfy the borrow checker without an early return or heavy use of clones. One trick you can then employ is to define-and-call a closure like so: +```rs +(||->Result<_, GenericError>{ + //we should destructure here actually + for (idx,item) in items.into_iter().enumerate() { + if let Err(e) = complicated_check(item) { + //this now returns from the closure, not the parent function + return Err(GenericError::from_msg_with_cause(item.name.unwrap_or_else(||format!("[item {idx}]")),e)); + } + //more logic + } +})().with_node("items")?; +``` + +When parsing a field value you might encounter a different kind of error. For example, the config might contain a port number that is outside of the valid range, or try to set a header name from an invalid string. + +In those cases you should use `GenericError::from_msg_with_cause` or `GenericError::from_msg` and the `msg` should be a string saying which _value_ failed to parse as _what_ and the `cause` should be the underlying error message, if any. + +For example: +```rs +let header_name = http::HeaderName::from_str(header_name) + .map_err(|e| GenericError::from_str_with_cause( + format!("Failed to parse \"{header_name}\" as HeaderName"),e) + ).with_node("header_name")?; +// for this one, we don't include the error because the TryFromIntError does not tells us anything new +// whereas the header name one might say which character was invalid for example +let port = u16::try_from(port) + .map_err(|_| GenericError::from_msg(format!("\"{port}\" is not a valid port number"))) + .with_node(port)?; +``` \ No newline at end of file diff --git a/orion-configuration/src/config/bootstrap.rs b/orion-configuration/src/config/bootstrap.rs new file mode 100644 index 00000000..5c869c1d --- /dev/null +++ b/orion-configuration/src/config/bootstrap.rs @@ -0,0 +1,286 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::{cluster::Cluster, common::is_default, listener::Listener, secret::Secret}; +use compact_str::CompactString; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct Bootstrap { + #[serde(with = "serde_yaml::with::singleton_map_recursive", skip_serializing_if = "is_default", default)] + pub static_resources: StaticResources, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + dynamic_resources: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub node: Option, +} + +impl Bootstrap { + pub fn get_ads_configs(&self) -> &[CompactString] { + self.dynamic_resources.as_ref().map(|dr| dr.grpc_cluster_specifiers.as_slice()).unwrap_or_default() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Node { + pub id: CompactString, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct DynamicResources { + pub grpc_cluster_specifiers: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct StaticResources { + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub listeners: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub clusters: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub secrets: Vec, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{Bootstrap, DynamicResources, Node, StaticResources}; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::{ + bootstrap::v3::{ + bootstrap::{DynamicResources as EnvoyDynamicResources, StaticResources as EnvoyStaticResources}, + Bootstrap as EnvoyBootstrap, + }, + core::v3::{ + grpc_service::{EnvoyGrpc, TargetSpecifier as EnvoyGrpcTargetSpecifier}, + ApiConfigSource as EnvoyApiConfigSource, GrpcService as EnvoyGrpcService, Node as EnvoyNode, + }, + }; + + impl Bootstrap { + pub fn deserialize_from_envoy(rdr: R) -> Result { + let envoy: EnvoyBootstrap = + orion_data_plane_api::decode::from_serde_deserializer(serde_yaml::Deserializer::from_reader(rdr)) + .map_err(|e| GenericError::from_msg_with_cause("failed to deserialize envoy bootstrap", e))?; + envoy.try_into() + } + } + + impl TryFrom for Bootstrap { + type Error = GenericError; + fn try_from(envoy: EnvoyBootstrap) -> Result { + let EnvoyBootstrap { + node, + node_context_params, + static_resources, + dynamic_resources, + cluster_manager, + hds_config, + flags_path, + stats_sinks, + deferred_stat_options, + stats_config, + stats_flush_interval, + watchdog, + watchdogs, + tracing, + layered_runtime, + admin, + overload_manager, + enable_dispatcher_stats, + header_prefix, + stats_server_version_override, + use_tcp_for_dns_lookups, + dns_resolution_config, + typed_dns_resolver_config, + bootstrap_extensions, + fatal_actions, + config_sources, + default_config_source, + default_socket_interface, + certificate_provider_instances, + inline_headers, + perf_tracing_file_path, + default_regex_engine, + xds_delegate_extension, + xds_config_tracker_extension, + listener_manager, + application_log_config, + grpc_async_client_manager_config, + stats_flush, + memory_allocator_manager: _, + } = envoy; + unsupported_field!( + // node, + node_context_params, + // static_resources, + // dynamic_resources, + cluster_manager, + hds_config, + flags_path, + stats_sinks, + deferred_stat_options, + stats_config, + stats_flush_interval, + watchdog, + watchdogs, + tracing, + layered_runtime, + admin, + overload_manager, + enable_dispatcher_stats, + header_prefix, + stats_server_version_override, + use_tcp_for_dns_lookups, + dns_resolution_config, + typed_dns_resolver_config, + bootstrap_extensions, + fatal_actions, + config_sources, + default_config_source, + default_socket_interface, + certificate_provider_instances, + inline_headers, + perf_tracing_file_path, + default_regex_engine, + xds_delegate_extension, + xds_config_tracker_extension, + listener_manager, + application_log_config, + grpc_async_client_manager_config, + stats_flush + )?; + let static_resources = convert_opt!(static_resources)?; + let dynamic_resources = + dynamic_resources.map(DynamicResources::try_from).transpose().with_node("dynamic_resources")?; + let node = node.map(Node::try_from).transpose().with_node("node")?; + Ok(Self { static_resources, node, dynamic_resources }) + } + } + impl TryFrom for Node { + type Error = GenericError; + fn try_from(value: EnvoyNode) -> Result { + let EnvoyNode { + id, + cluster, + metadata, + dynamic_parameters, + locality, + user_agent_name, + extensions, + client_features, + listening_addresses, + user_agent_version_type, + } = value; + unsupported_field!( + // id, + cluster, + metadata, + dynamic_parameters, + locality, + user_agent_name, + extensions, + client_features, + listening_addresses, + user_agent_version_type + )?; + let id = required!(id)?.into(); + Ok(Self { id }) + } + } + impl TryFrom for DynamicResources { + type Error = GenericError; + fn try_from(value: EnvoyDynamicResources) -> Result { + let EnvoyDynamicResources { + lds_config, + lds_resources_locator, + cds_config, + cds_resources_locator, + ads_config, + } = value; + unsupported_field!(lds_config, lds_resources_locator, cds_config, cds_resources_locator)?; + let EnvoyApiConfigSource { + api_type, + transport_api_version, + cluster_names, + grpc_services, + refresh_delay, + request_timeout, + rate_limit_settings, + set_node_on_first_message_only, + config_validators, + } = required!(ads_config)?; + let grpc_cluster_specifiers = (|| -> Result<_, GenericError> { + unsupported_field!( + //todo(hayley): are these required to be set? + api_type, + transport_api_version, + cluster_names, + // grpc_services, + refresh_delay, + request_timeout, + rate_limit_settings, + set_node_on_first_message_only, + config_validators + )?; + (|| -> Result<_, GenericError> { + let mut cluster_specifiers = Vec::new(); + for EnvoyGrpcService { timeout, initial_metadata, target_specifier, retry_policy: _ } in + required!(grpc_services)? + { + unsupported_field!(timeout, initial_metadata)?; + match required!(target_specifier)? { + EnvoyGrpcTargetSpecifier::EnvoyGrpc(EnvoyGrpc { + cluster_name, + authority, + retry_policy, + max_receive_message_length: _, + skip_envoy_headers: _, + }) => { + unsupported_field!(authority, retry_policy).with_node("target_specifier")?; + let cluster_name = required!(cluster_name).with_node("target_specifier")?; + cluster_specifiers.push(CompactString::from(cluster_name)) + }, + EnvoyGrpcTargetSpecifier::GoogleGrpc(_) => { + return Err(GenericError::unsupported_variant("GoogleGrpc")) + .with_node("target_specifier") + }, + } + } + Ok(cluster_specifiers) + })() + .with_node("grpc_services") + })() + .with_node("ads_config")?; + Ok(DynamicResources { grpc_cluster_specifiers }) + } + } + impl TryFrom for StaticResources { + type Error = GenericError; + fn try_from(envoy: EnvoyStaticResources) -> Result { + let EnvoyStaticResources { listeners, clusters, secrets } = envoy; + let listeners = convert_vec!(listeners)?; + let secrets = convert_vec!(secrets)?; + let clusters = convert_vec!(clusters)?; + Ok(Self { listeners, clusters, secrets }) + } + } +} diff --git a/orion-configuration/src/config/cluster.rs b/orion-configuration/src/config/cluster.rs new file mode 100644 index 00000000..09485faf --- /dev/null +++ b/orion-configuration/src/config/cluster.rs @@ -0,0 +1,753 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod health_check; +pub use health_check::HealthCheck; +pub mod http_protocol_options; +pub use http_protocol_options::HttpProtocolOptions; +pub mod cluster_specifier; +pub use cluster_specifier::ClusterSpecifier; + +use super::{ + common::is_default, + secret::TlsCertificate, + transport::{BindDevice, CommonTlsValidationContext, TlsParameters}, +}; + +use compact_str::CompactString; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::{fmt::Display, net::SocketAddr, num::NonZeroU32, time::Duration}; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] +pub struct Cluster { + pub name: CompactString, + #[serde(flatten)] + pub discovery_settings: ClusterDiscoveryType, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub tls_config: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub bind_device: Option, + #[serde(skip_serializing_if = "is_default", default)] + pub load_balancing_policy: LbPolicy, + #[serde(skip_serializing_if = "is_default", default)] + pub http_protocol_options: HttpProtocolOptions, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub health_check: Option, + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub connect_timeout: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ClusterLoadAssignment { + #[serde( + serialize_with = "simplify_locality_lb_endpoints", + deserialize_with = "deser_through::" + )] + pub endpoints: Vec, +} + +fn simplify_locality_lb_endpoints( + value: &Vec, + serializer: S, +) -> Result { + if value.len() == 1 && value[0].priority == 0 { + simplify_lb_endpoints(&value[0].lb_endpoints, serializer) + } else { + value.serialize(serializer) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum LocalityLbEndpointsDeser { + LocalityLbEndpoints(Vec), + Simplified(LbEndpointVecDeser), +} + +impl From for Vec { + fn from(value: LocalityLbEndpointsDeser) -> Self { + match value { + LocalityLbEndpointsDeser::Simplified(simple) => { + vec![LocalityLbEndpoints { priority: 0, lb_endpoints: simple.into() }] + }, + LocalityLbEndpointsDeser::LocalityLbEndpoints(vec) => vec, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct LocalityLbEndpoints { + pub priority: u32, + #[serde(serialize_with = "simplify_lb_endpoints", deserialize_with = "deser_through::")] + pub lb_endpoints: Vec, +} + +fn simplify_lb_endpoints(value: &Vec, serializer: S) -> Result { + if value.iter().all(|s| is_default(&s.health_status) && s.load_balancing_weight == NonZeroU32::MIN) { + value.iter().map(|endpoint| endpoint.address).collect::>().serialize(serializer) + } else { + value.serialize(serializer) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum LbEndpointVecDeser { + LbEndpoints(Vec), + Address(Vec), +} + +impl From for Vec { + fn from(value: LbEndpointVecDeser) -> Self { + match value { + LbEndpointVecDeser::Address(address) => address + .into_iter() + .map(|address| LbEndpoint { + address, + health_status: HealthStatus::default(), + load_balancing_weight: NonZeroU32::MIN, + }) + .collect(), + LbEndpointVecDeser::LbEndpoints(vec) => vec, + } + } +} + +fn deser_through<'de, In: Deserialize<'de>, Out: From, D: Deserializer<'de>>( + deserializer: D, +) -> Result { + In::deserialize(deserializer).map(Out::from) +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct LbEndpoint { + pub address: SocketAddr, + #[serde(skip_serializing_if = "is_default", default)] + pub health_status: HealthStatus, + pub load_balancing_weight: NonZeroU32, +} + +#[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] +pub enum HealthStatus { + #[default] + Healthy, + Unhealthy, +} + +impl Display for HealthStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + HealthStatus::Healthy => "Healthy", + HealthStatus::Unhealthy => "Unhealthy", + } + ) + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +#[serde(tag = "discovery", content = "discovery_settings")] +pub enum ClusterDiscoveryType { + #[serde(rename = "static")] + Static(ClusterLoadAssignment), + #[serde(rename = "EDS")] + Eds, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct TlsConfig { + //todo(hayley): This field is not marked as required by envoy + // but sni is required in our client TLS stack. + // We could technically fall back to using the endpoint adress/name for the sni + // where no sni is configured here but that would require a major refactor. + // previous behaviour was to set sni to the empty string if missing. + pub sni: CompactString, + #[serde(skip_serializing_if = "is_default", default)] + pub parameters: TlsParameters, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default", flatten)] + pub secret: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default", flatten)] + pub validation_context: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum TlsSecret { + #[serde(rename = "tls_certificate_sds")] + SdsConfig(CompactString), + #[serde(rename = "tls_certificate")] + Certificate(TlsCertificate), +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, Default)] +#[serde(rename_all = "snake_case")] +pub enum LbPolicy { + #[default] + RoundRobin, + Random, + LeastRequest, + RingHash, + Maglev, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::health_check::{ClusterHostnameError, HealthCheck, HealthCheckProtocol}; + use super::{ + Cluster, ClusterDiscoveryType, ClusterLoadAssignment, HealthStatus, HttpProtocolOptions, LbEndpoint, LbPolicy, + LocalityLbEndpoints, TlsConfig, TlsSecret, + }; + use crate::config::common::*; + use crate::config::{ + core::Address, + transport::{BindDevice, CommonTlsContext, Secrets, SupportedEnvoyTransportSocket}, + util::duration_from_envoy, + }; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::{ + cluster::v3::{ + cluster::{ + ClusterDiscoveryType as EnvoyClusterDiscoveryType, DiscoveryType as EnvoyDiscoveryType, + LbConfig as EnvoyLbConfig, LbPolicy as EnvoyLbPolicy, + }, + Cluster as EnvoyCluster, + }, + core::v3::{ + BindConfig as EnvoyBindConfig, HealthStatus as EnvoyHealthStatus, + TransportSocket as EnvoyTransportSocket, + }, + endpoint::v3::{ + lb_endpoint::HostIdentifier as EnvoyHostIdentifier, + ClusterLoadAssignment as EnvoyClusterLoadAssignment, Endpoint as EnvoyEndpoint, + LbEndpoint as EnvoyLbEndpoint, LocalityLbEndpoints as EnvoyLocalityLbEndpoints, + }, + }, + extensions::transport_sockets::tls::v3::UpstreamTlsContext, + }, + google::protobuf::Any, + }; + use std::{collections::BTreeSet, net::SocketAddr, num::NonZeroU32}; + + impl TryFrom for Cluster { + type Error = GenericError; + fn try_from(envoy: EnvoyCluster) -> Result { + let EnvoyCluster { + transport_socket_matches, + name, + alt_stat_name, + eds_cluster_config, + connect_timeout, + per_connection_buffer_limit_bytes, + lb_policy, + load_assignment, + health_checks, + max_requests_per_connection, + circuit_breakers, + upstream_http_protocol_options, + common_http_protocol_options, + http_protocol_options, + http2_protocol_options, + typed_extension_protocol_options, + dns_refresh_rate, + dns_failure_refresh_rate, + respect_dns_ttl, + dns_lookup_family, + dns_resolvers, + use_tcp_for_dns_lookups, + dns_resolution_config, + typed_dns_resolver_config, + wait_for_warm_on_init, + outlier_detection, + cleanup_interval, + upstream_bind_config, + lb_subset_config, + common_lb_config, + transport_socket, + metadata, + protocol_selection, + upstream_connection_options, + close_connections_on_host_health_failure, + ignore_health_on_host_removal, + filters, + load_balancing_policy, + lrs_server, + track_timeout_budgets, + upstream_config, + track_cluster_stats, + preconnect_policy, + connection_pool_per_downstream_connection, + cluster_discovery_type, + lb_config, + dns_jitter: _, + lrs_report_endpoint_metrics: _, + } = envoy; + let name = required!(name)?; + (|| -> Result { + unsupported_field!( + transport_socket_matches, + // name, + alt_stat_name, + eds_cluster_config, + // connect_timeout, + per_connection_buffer_limit_bytes, + // lb_policy, + // load_assignment, + // health_checks, + max_requests_per_connection, + circuit_breakers, + upstream_http_protocol_options, + common_http_protocol_options, + http_protocol_options, + http2_protocol_options, + // typed_extension_protocol_options, + dns_refresh_rate, + dns_failure_refresh_rate, + respect_dns_ttl, + dns_lookup_family, + dns_resolvers, + use_tcp_for_dns_lookups, + dns_resolution_config, + typed_dns_resolver_config, + wait_for_warm_on_init, + outlier_detection, + cleanup_interval, + // upstream_bind_config, + lb_subset_config, + common_lb_config, + // transport_socket, + metadata, + protocol_selection, + upstream_connection_options, + close_connections_on_host_health_failure, + ignore_health_on_host_removal, + filters, + load_balancing_policy, + lrs_server, + track_timeout_budgets, + upstream_config, + track_cluster_stats, + preconnect_policy, + connection_pool_per_downstream_connection + // cluster_discovery_type, + // lb_config + )?; + + if let Some(lb_config_type) = &lb_config { + // `lb_config` is a synthetic enum created when parsing the configuration, + // we can't report it as the actual offending field + let err = match lb_config_type { + EnvoyLbConfig::RingHashLbConfig(_) => GenericError::UnsupportedField("ring_hash_lb_config"), + EnvoyLbConfig::MaglevLbConfig(_) => GenericError::UnsupportedField("maglev_lb_config"), + EnvoyLbConfig::OriginalDstLbConfig(_) => { + GenericError::UnsupportedField("original_dst_lb_config") + }, + EnvoyLbConfig::LeastRequestLbConfig(_) => { + GenericError::UnsupportedField("least_request_lb_config") + }, + EnvoyLbConfig::RoundRobinLbConfig(_) => GenericError::UnsupportedField("round_robin_lb_config"), + }; + Err(err)? + } + + let name = CompactString::from(&name); + // let cluster_discovery_type = convert_opt!(cluster_discovery_type)?; + let discovery_settings = ( + required!(cluster_discovery_type)?, + load_assignment.map(ClusterLoadAssignment::try_from).transpose().with_node("load_assignment")?, + ) + .try_into() + .with_node("cluster_discovery_type")?; + //fixme(hayley): the envoy protobuf documentation says: + // > If the address and port are empty, no bind will be performed. + // but its unclear what adress this is refering to. For now we will always bind. + let bind_device = upstream_bind_config + .map(bind_device_from_bind_config) + .transpose() + .with_node("upstream_bind_config")? + .flatten(); + let tls_config = transport_socket.map(TlsConfig::try_from).transpose().with_node("transport_socket")?; + let load_balancing_policy = lb_policy.try_into().with_node("lb_policy")?; + let http_protocol_options = typed_extension_protocol_options + .into_values() + .map(HttpProtocolOptions::try_from) + .collect::, GenericError>>() + .with_node("typed_extension_protocol_options")?; + if http_protocol_options.len() > 1 { + return Err(GenericError::from_msg( + "Only one set of http protocol options can be specified per upstream", + )) + .with_node("typed_extension_protocol_options"); + } + let http_protocol_options = http_protocol_options.into_iter().next().unwrap_or_default(); + if health_checks.len() > 1 { + return Err(GenericError::from_msg("only one healthcheck per cluster is supported") + .with_node("health_check")); + } + let health_check = health_checks + .into_iter() + .next() + .map(HealthCheck::try_from) + .transpose() + .with_index(0) + .with_node("health_checks")?; + + // These are soft validations related to the health checkers that are hard to encode in the type system, + // so we'll try to detect as many of them here and fail now. These validations are done again in the + // actual health checking code, but since we validated the data here, they should always come clean. + if let Some(health_check_value) = &health_check { + match &health_check_value.protocol { + HealthCheckProtocol::Http(http_check) => { + // Validate the host name for the HTTP request + match http_check.host(&name) { + Ok(_) => (), + Err(err @ ClusterHostnameError) => { + return Err(GenericError::from_msg_with_cause( + "tried to use the cluster name as the HTTP health check host name (since http_health_check.host was not specified) but failed", + err, + ) + .with_node("name")) + }, + } + + // Validate the HTTP version of the health checker is supported by the HTTP options + if http_check.http_version != http_protocol_options.codec { + return Err(GenericError::from_msg( + "health check and cluster HTTP versions don't match", + ) + .with_node("codec_client_type") + .with_node("http_health_check") + .with_index(0) + .with_node("health_checks")); + } + }, + HealthCheckProtocol::Grpc(_) => { + if !http_protocol_options.codec.is_http2() { + return Err(GenericError::from_msg("gRPC health checker requires HTTP 2") + .with_node("grpc_health_check") + .with_index(0) + .with_node("health_checks")); + } + }, + HealthCheckProtocol::Tcp(_) => (), + } + } + + let connect_timeout = connect_timeout + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("Failed to convert connect_timeout into Duration")) + .with_node("connect_timeout")?; + Ok(Self { + name, + discovery_settings, + bind_device, + tls_config, + load_balancing_policy, + http_protocol_options, + health_check, + connect_timeout, + }) + })() + .with_name(name) + } + } + + impl TryFrom for ClusterLoadAssignment { + type Error = GenericError; + fn try_from(value: EnvoyClusterLoadAssignment) -> Result { + let EnvoyClusterLoadAssignment { cluster_name, endpoints, named_endpoints, policy } = value; + unsupported_field!(named_endpoints, policy)?; + let ret = (|| -> Result<_, _> { + let endpoints: Vec = convert_non_empty_vec!(endpoints)?; + if !endpoints.is_empty() { + let set_of_priorities = endpoints.iter().map(|e| e.priority).collect::>(); + let n_entries = set_of_priorities.len(); + let first = set_of_priorities.first().copied().unwrap_or_default(); + let last = set_of_priorities.last().copied().unwrap_or_default() as usize; + if (first, last) != (0, n_entries - 1) { + return Err(GenericError::from_msg( + "Priorities should range from 0 (highest) to N (lowest) without skipping.", + )) + .with_node("endpoints"); + } + } + Ok(Self { endpoints }) + })(); + if !cluster_name.is_empty() { + ret.with_name(cluster_name) + } else { + ret + } + } + } + + impl TryFrom for LocalityLbEndpoints { + type Error = GenericError; + fn try_from(value: EnvoyLocalityLbEndpoints) -> Result { + let EnvoyLocalityLbEndpoints { + locality, + lb_endpoints, + load_balancing_weight, + priority, + proximity, + lb_config, + metadata: _, + } = value; + unsupported_field!(locality, load_balancing_weight, proximity, lb_config)?; + let lb_endpoints: Vec = convert_non_empty_vec!(lb_endpoints)?; + let mut sum = 0u32; + for lb_endpoint in &lb_endpoints { + sum = if let Some(x) = sum.checked_add(lb_endpoint.load_balancing_weight.into()) { + x + } else { + return Err(GenericError::from_msg("Sum of weights has to be less than 4_294_967_295")) + .with_node("lb_endpoints"); + } + } + Ok(Self { lb_endpoints, priority }) + } + } + + impl From for HealthStatus { + fn from(value: EnvoyHealthStatus) -> Self { + match value { + EnvoyHealthStatus::Healthy | EnvoyHealthStatus::Unknown => HealthStatus::Healthy, + _ => HealthStatus::Unhealthy, + } + } + } + + impl TryFrom for HealthStatus { + type Error = GenericError; + fn try_from(value: i32) -> Result { + EnvoyHealthStatus::from_i32(value) + .ok_or_else(|| GenericError::from_msg(format!("[unknown HealthStatus {value}]"))) + .map(Self::from) + } + } + + impl TryFrom for LbEndpoint { + type Error = GenericError; + fn try_from(value: EnvoyLbEndpoint) -> Result { + let EnvoyLbEndpoint { health_status, metadata, load_balancing_weight, host_identifier } = value; + unsupported_field!(metadata)?; + let address = match required!(host_identifier)? { + EnvoyHostIdentifier::Endpoint(EnvoyEndpoint { + address, + health_check_config, + hostname, + additional_addresses, + }) => (|| -> Result { + unsupported_field!(health_check_config, hostname, additional_addresses)?; + let address: Address = convert_opt!(address)?; + Ok(address.into_socket_addr()) + })(), + EnvoyHostIdentifier::EndpointName(_) => Err(GenericError::unsupported_variant("EndpointName")), + } + .with_node("host")?; + let load_balancing_weight = load_balancing_weight.map(|v| v.value).unwrap_or(1); + let load_balancing_weight = NonZeroU32::try_from(load_balancing_weight) + .map_err(|_| GenericError::from_msg("load_balancing_weight can't be zero")) + .with_node("load_balancing_weight")?; + let health_status = health_status.try_into().with_node("health_status")?; + Ok(Self { address, health_status, load_balancing_weight }) + } + } + + impl TryFrom<(EnvoyClusterDiscoveryType, Option)> for ClusterDiscoveryType { + type Error = GenericError; + fn try_from( + (discovery, cla): (EnvoyClusterDiscoveryType, Option), + ) -> Result { + match discovery { + EnvoyClusterDiscoveryType::ClusterType(_) => Err(GenericError::unsupported_variant("ClusterType")), + EnvoyClusterDiscoveryType::Type(x) => (x, cla).try_into(), + } + } + } + + impl TryFrom<(i32, Option)> for ClusterDiscoveryType { + type Error = GenericError; + fn try_from((discovery, cla): (i32, Option)) -> Result { + let discovery = EnvoyDiscoveryType::from_i32(discovery) + .ok_or_else(|| GenericError::unsupported_variant(format!("[unknown DiscoveryType {discovery}]")))?; + (discovery, cla).try_into() + } + } + + impl TryFrom<(EnvoyDiscoveryType, Option)> for ClusterDiscoveryType { + type Error = GenericError; + fn try_from( + (discovery, cla): (EnvoyDiscoveryType, Option), + ) -> Result { + match (discovery, cla) { + (EnvoyDiscoveryType::Static, Some(cla)) => Ok(Self::Static(cla)), + (EnvoyDiscoveryType::Static, None) => Err(GenericError::from_msg( + "Static clusters are required to have a cluster load assignment configured", + )), + (EnvoyDiscoveryType::Eds, None) => Ok(Self::Eds), + (EnvoyDiscoveryType::Eds, Some(_)) => { + Err(GenericError::from_msg("EDS clusters can't have a static cluster load assignment configured")) + }, + (EnvoyDiscoveryType::LogicalDns, _) => Err(GenericError::unsupported_variant("LogicalDns")), + (EnvoyDiscoveryType::StrictDns, _) => Err(GenericError::unsupported_variant("StrictDns")), + (EnvoyDiscoveryType::OriginalDst, _) => Err(GenericError::unsupported_variant("OriginalDst")), + } + } + } + + //todo(hayley): refactor this to a trait impl when splitting the envoy conversions out of this crate + fn bind_device_from_bind_config(value: EnvoyBindConfig) -> Result, GenericError> { + let EnvoyBindConfig { + source_address, + freebind, + socket_options, + extra_source_addresses, + additional_source_addresses, + local_address_selector, + } = value; + unsupported_field!( + source_address, + freebind, + // socket_options, + extra_source_addresses, + additional_source_addresses, + local_address_selector + )?; + let bind_device = convert_vec!(socket_options)?; + if bind_device.len() > 1 { + return Err(GenericError::from_msg("at most one bind device is supported")).with_node("socket_options"); + } + Ok(bind_device.into_iter().next()) + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(envoy: Any) -> Result { + SupportedEnvoyTransportSocket::try_from(envoy)?.try_into() + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(envoy: EnvoyTransportSocket) -> Result { + let EnvoyTransportSocket { name, config_type } = envoy; + // the envoy docs say that name has to be envoy.transport_sockets.tls or tls (deprecated) + // but it doesn't actually have to be, it just works with any string but it _is_ required to be + // non-empty. + // so in order to maximize compat with Envoys actual behaviour we check that it's not empty and leave it at that + let name = required!(name)?; + (|| -> Result<_, GenericError> { + match required!(config_type)? { + orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::transport_socket::ConfigType::TypedConfig(any) => { + Self::try_from(any) + } + } + })().with_node("config_type").with_name(name) + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(value: SupportedEnvoyTransportSocket) -> Result { + match value { + SupportedEnvoyTransportSocket::DownstreamTlsContext(_) => { + Err(GenericError::unsupported_variant("DownstreamTlsContext")) + }, + SupportedEnvoyTransportSocket::UpstreamTlsContext(x) => x.try_into(), + } + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(value: UpstreamTlsContext) -> Result { + let UpstreamTlsContext { + common_tls_context, + sni, + allow_renegotiation, + max_session_keys, + enforce_rsa_key_usage, + auto_host_sni: _, + auto_sni_san_validation: _, + } = value; + unsupported_field!( + // common_tls_context, + // sni, + allow_renegotiation, + max_session_keys, + enforce_rsa_key_usage + )?; + let CommonTlsContext { parameters, secrets, validation_context } = convert_opt!(common_tls_context)?; + let secret = match secrets { + Secrets::Certificates(certs) => { + if certs.len() > 1 { + Err(GenericError::from_msg("at most one certificate is supported for upstream tls context")) + } else { + Ok(certs.into_iter().next().map(TlsSecret::Certificate)) + } + }, + Secrets::SdsConfig(sds) => { + if sds.len() > 1 { + Err(GenericError::from_msg("at most one certificate is supported for upstream tls context")) + } else { + Ok(sds.into_iter().next().map(TlsSecret::SdsConfig)) + } + }, + } + .with_node("common_tls_context") + .with_node("secrets")?; + let sni = required!(sni)?.into(); + Ok(Self { sni, parameters, secret, validation_context }) + } + } + + impl TryFrom for LbPolicy { + type Error = GenericError; + fn try_from(value: EnvoyLbPolicy) -> Result { + Ok(match value { + EnvoyLbPolicy::RoundRobin => Self::RoundRobin, + EnvoyLbPolicy::Random => Self::Random, + EnvoyLbPolicy::LeastRequest => Self::LeastRequest, + EnvoyLbPolicy::RingHash => Self::RingHash, + EnvoyLbPolicy::Maglev => Self::Maglev, + EnvoyLbPolicy::ClusterProvided => return Err(GenericError::unsupported_variant("ClusterProvided")), + EnvoyLbPolicy::LoadBalancingPolicyConfig => { + return Err(GenericError::unsupported_variant("LoadBalancingPolicyConfig")) + }, + }) + } + } + + impl TryFrom for LbPolicy { + type Error = GenericError; + fn try_from(value: i32) -> Result { + EnvoyLbPolicy::from_i32(value) + .ok_or_else(|| GenericError::unsupported_variant(format!("[unknown LbPolicy {value}]")))? + .try_into() + } + } +} diff --git a/orion-configuration/src/config/cluster/cluster_specifier.rs b/orion-configuration/src/config/cluster/cluster_specifier.rs new file mode 100644 index 00000000..569ec3ef --- /dev/null +++ b/orion-configuration/src/config/cluster/cluster_specifier.rs @@ -0,0 +1,174 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use compact_str::CompactString; +use serde::{Deserialize, Serialize}; +use std::num::NonZeroU32; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum ClusterSpecifier { + Cluster(CompactString), + WeightedCluster(Vec), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct WeightedClusterSpecifier { + pub cluster: CompactString, + pub weight: NonZeroU32, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ClusterSpecifier, WeightedClusterSpecifier}; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::route::v3::{ + route_action::ClusterSpecifier as EnvoyClusterSpecifier, + weighted_cluster::ClusterWeight as EnvoyClusterWeight, WeightedCluster as EnvoyWeightedCluster, + }, + extensions::filters::network::tcp_proxy::v3::tcp_proxy::{ + weighted_cluster::ClusterWeight as EnvoyTcpClusterWeight, ClusterSpecifier as EnvoyTcpClusterSpecifier, + WeightedCluster as EnvoyTcpWeightedCluster, + }, + }; + + impl TryFrom for ClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyWeightedCluster) -> Result { + let EnvoyWeightedCluster { clusters, total_weight, runtime_key_prefix, random_value_specifier } = value; + unsupported_field!(total_weight, runtime_key_prefix, random_value_specifier)?; + let clusters: Vec = convert_non_empty_vec!(clusters)?; + let mut sum = 0u32; + for cluster in &clusters { + sum = sum.checked_add(cluster.weight.into()).ok_or( + GenericError::from_msg("sum of cluster weights must not exceed 4_294_967_295") + .with_node("clusters"), + )? + } + if sum == 0 { + return Err(GenericError::from_msg("sum of cluster weights must be > 0")); + } + Ok(Self::WeightedCluster(clusters)) + } + } + + impl TryFrom for ClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyClusterSpecifier) -> Result { + match value { + EnvoyClusterSpecifier::Cluster(cluster) => { + required!(cluster).map(CompactString::from).map(Self::Cluster) + }, + EnvoyClusterSpecifier::WeightedClusters(envoy) => envoy.try_into(), + EnvoyClusterSpecifier::ClusterHeader(_) => Err(GenericError::unsupported_variant("ClusterHeader")), + EnvoyClusterSpecifier::ClusterSpecifierPlugin(_) => { + Err(GenericError::unsupported_variant("ClusterSpecifierPlugin")) + }, + EnvoyClusterSpecifier::InlineClusterSpecifierPlugin(_) => { + Err(GenericError::unsupported_variant("InlineClusterSpecifierPlugin")) + }, + } + } + } + + impl TryFrom for WeightedClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyClusterWeight) -> Result { + let EnvoyClusterWeight { + name, + cluster_header, + weight, + metadata_match, + request_headers_to_add, + request_headers_to_remove, + response_headers_to_add, + response_headers_to_remove, + typed_per_filter_config, + host_rewrite_specifier, + } = value; + unsupported_field!( + // name, + cluster_header, + // weight, + metadata_match, + request_headers_to_add, + request_headers_to_remove, + response_headers_to_add, + response_headers_to_remove, + typed_per_filter_config, + host_rewrite_specifier + )?; + let cluster: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + // we could allow for default = 1 if missing in ng to allow equaly balanced clusters with shorthand notation + let weight = weight.map(|x| x.value).ok_or(GenericError::MissingField("weight"))?; + let weight = weight + .try_into() + .map_err(|_| GenericError::from_msg("clusterweight has to be > 0")) + .with_node("weight")?; + Ok(Self { cluster: cluster.clone(), weight }) + })() + .with_name(cluster) + } + } + + impl TryFrom for ClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyTcpClusterSpecifier) -> Result { + match value { + EnvoyTcpClusterSpecifier::Cluster(cluster) => Ok(Self::Cluster(cluster.into())), + EnvoyTcpClusterSpecifier::WeightedClusters(wc) => wc.try_into(), + } + } + } + + impl TryFrom for ClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyTcpWeightedCluster) -> Result { + let EnvoyTcpWeightedCluster { clusters } = value; + let clusters: Vec = convert_non_empty_vec!(clusters)?; + let mut sum = 0u32; + for cluster in &clusters { + sum = sum.checked_add(cluster.weight.into()).ok_or( + GenericError::from_msg("sum of cluster weights must not exceed 4_294_967_295") + .with_node("clusters"), + )?; + } + Ok(Self::WeightedCluster(clusters)) + } + } + + impl TryFrom for WeightedClusterSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyTcpClusterWeight) -> Result { + let EnvoyTcpClusterWeight { name, weight, metadata_match } = value; + unsupported_field!(metadata_match)?; + let cluster = required!(name)?.into(); + let weight = weight + .try_into() + .map_err(|_| GenericError::from_msg("clusterweight has to be > 0")) + .with_node("weight")?; + Ok(Self { cluster, weight }) + } + } +} diff --git a/orion-configuration/src/config/cluster/health_check.rs b/orion-configuration/src/config/cluster/health_check.rs new file mode 100644 index 00000000..898f5820 --- /dev/null +++ b/orion-configuration/src/config/cluster/health_check.rs @@ -0,0 +1,622 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::common::is_default; +use compact_str::CompactString; +use http::{ + uri::{Authority, PathAndQuery}, + Method, +}; +use serde::{ser::SerializeStruct, Deserialize, Serialize}; +use std::{ops::Range, str::FromStr, time::Duration}; + +pub use super::http_protocol_options::Codec; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] +pub struct ClusterHealthCheck { + /// Timeout to wait for a health check response. + #[serde(with = "humantime_serde")] + pub timeout: Duration, + /// The interval between health checks. + #[serde(with = "humantime_serde")] + pub interval: Duration, + /// The number of unhealthy health checks required before a host is marked unhealthy. + pub unhealthy_threshold: u16, + /// The number of healthy health checks required before a host is marked healthy. + pub healthy_threshold: u16, + /// Reuse health check connection between health checks. Default is `true`. + // pub reuse_connection: bool, + /// If specified, Envoy will start health checking after for a random time between 0 and `initial_jitter`. + /// This only applies to the first health check. + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub initial_jitter: Option, + /// If specified, during every interval Envoy will add a random time between 0 and `interval_jitter` to the wait time. + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub interval_jitter: Option, + /// An optional jitter amount from 0.0 to 1.0. + /// If specified, during every interval we will add `interval` * `interval_jitter_percent` to the wait time. + /// If `interval_jitter` and `interval_jitter_percent` are both set, both of them will be used to increase the wait time. + #[serde(skip_serializing_if = "f32_is_zero", default = "Default::default")] + pub interval_jitter_percent: f32, + /// The “unhealthy interval” is a health check interval that is used for hosts that are marked as unhealthy. + /// As soon as the host is marked as healthy, Envoy will shift back to using the standard health check + /// interval that is defined. + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub unhealthy_interval: Option, + /// The “unhealthy edge interval” is a special health check interval that is used for the first health check + /// right after a host is marked as unhealthy. For subsequent health checks Envoy will shift back to using either + /// “unhealthy interval” if present or the standard health check interval that is defined. + /// The default value for “unhealthy edge interval” is the same as “unhealthy interval”. + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub unhealthy_edge_interval: Option, + /// The “healthy edge interval” is a special health check interval that is used for the first health check + /// right after a host is marked as healthy. For subsequent health checks Envoy will shift back to using + /// the standard health check interval that is defined. + /// The default value for “healthy edge interval” is the same as the default interval. + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub healthy_edge_interval: Option, +} + +fn f32_is_zero(x: &f32) -> bool { + *x == 0.0 +} + +impl ClusterHealthCheck { + pub fn new(timeout: Duration, interval: Duration, unhealthy_threshold: u16, healthy_threshold: u16) -> Self { + ClusterHealthCheck { + timeout, + interval, + unhealthy_threshold, + healthy_threshold, + initial_jitter: None, + interval_jitter: None, + interval_jitter_percent: 0.0, + unhealthy_interval: None, + unhealthy_edge_interval: None, + healthy_edge_interval: None, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum HealthCheckProtocol { + Http(HttpHealthCheck), + Tcp(TcpHealthCheck), + Grpc(GrpcHealthCheck), +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub struct HealthCheck { + #[serde(flatten)] + pub cluster: ClusterHealthCheck, + #[serde(flatten)] + pub protocol: HealthCheckProtocol, +} + +impl<'de> Deserialize<'de> for HealthCheckProtocol { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(tag = "protocol", content = "protocol_settings", rename_all = "UPPERCASE")] + enum ProtocolSerde { + Http(Option), + Tcp(Option), + Grpc(Option), + } + + ProtocolSerde::deserialize(deserializer).map(|protocol| match protocol { + ProtocolSerde::Http(http) => HealthCheckProtocol::Http(http.unwrap_or_default()), + ProtocolSerde::Tcp(tcp) => HealthCheckProtocol::Tcp(tcp.unwrap_or_default()), + ProtocolSerde::Grpc(grpc) => HealthCheckProtocol::Grpc(grpc.unwrap_or_default()), + }) + } +} + +impl Serialize for HealthCheckProtocol { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + const TAG: &str = "protocol"; + const CONTENT: &str = "protocol_settings"; + let mut s = serializer.serialize_struct("Protocol", 2)?; + match self { + Self::Http(_) => s.serialize_field(TAG, "HTTP")?, + Self::Tcp(_) => s.serialize_field(TAG, "TCP")?, + Self::Grpc(_) => s.serialize_field(TAG, "GRPC")?, + } + match self { + Self::Http(http) => { + if is_default(http) { + s.skip_field(CONTENT)? + } else { + s.serialize_field(CONTENT, http)? + } + }, + Self::Tcp(tcp) => { + if is_default(tcp) { + s.skip_field(CONTENT)? + } else { + s.serialize_field(CONTENT, tcp)? + } + }, + Self::Grpc(grpc) => { + if is_default(grpc) { + s.skip_field(CONTENT)? + } else { + s.serialize_field(CONTENT, grpc)? + } + }, + } + s.end() + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HttpHealthCheck { + #[serde(skip_serializing_if = "is_default", default)] + pub http_version: Codec, + #[serde(with = "http_serde_ext::method", skip_serializing_if = "is_default", default)] + pub method: Method, + #[serde(with = "http_serde_ext::authority::option", skip_serializing_if = "is_default", default)] + pub host: Option, + //todo(hayley): should be Range perhaps but would require some custom serde shenanigans + // see for example https://stackoverflow.com/a/72484080 + // n.b. for the inner StatusCode we can use http_serde_ext + // since Vec is a very specific type we might want to fully wrap it in another type even that's essentially + // a HashSet + #[serde(skip_serializing_if = "is_default_expected_statuses", default = "default_expected_statuses")] + pub expected_statuses: Vec>, + #[serde(skip_serializing_if = "is_default", default)] + pub retriable_statuses: Vec>, + #[serde(with = "http_serde_ext::path_and_query::option", skip_serializing_if = "is_default", default)] + pub path: Option, +} + +fn default_expected_statuses() -> Vec> { + vec![200..201] +} + +fn is_default_expected_statuses(value: &Vec>) -> bool { + *value == default_expected_statuses() +} + +impl Default for HttpHealthCheck { + fn default() -> Self { + Self { + http_version: Codec::Http1, + host: None, + method: Method::GET, + expected_statuses: default_expected_statuses(), + retriable_statuses: Vec::new(), + path: None, + } + } +} + +#[derive(thiserror::Error, Debug)] +#[error("Cluster name is not a valid host name")] +pub struct ClusterHostnameError; + +impl HttpHealthCheck { + pub fn host(&self, cluster_name: &str) -> Result { + // todo(hayley): validate that this order is correct. + // looking at the envoy docs for the http health check it says the following about the host field + // > The value of the host header in the HTTP health check request. If + // > left empty (default value), the name of the cluster this health check is associated + // > with will be used. The host header can be customized for a specific endpoint by setting the + // > :ref:`hostname ` field. + // which could imply it's 2->1->3 instead + + // The `host` field of the HTTP request comes from: + // 1. The HttpHealthCheck.host field, or + // 2. The HealthCheckConfig.hostname field, or + //NOTE(hayley): this^^^ is not implemented + // 3. The cluster's name. + if let Some(host) = &self.host { + //ideally all of the options here would be a headervalue and we wouldn't need to error out + Ok(host.to_owned()) + } else { + Authority::from_str(cluster_name).map_err(|_| ClusterHostnameError) + } + } +} + +#[derive(Debug, Clone, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct TcpHealthCheck { + pub send: Option>, + pub receive: Vec>, +} + +#[derive(Debug, Clone, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct GrpcHealthCheck { + pub service_name: CompactString, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ + default_expected_statuses, Codec, GrpcHealthCheck, HealthCheck, HealthCheckProtocol, HttpHealthCheck, + TcpHealthCheck, + }; + use crate::config::{common::*, util::duration_from_envoy}; + use http::{ + uri::{Authority, PathAndQuery}, + Method, + }; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::core::v3::{ + health_check::{ + payload::Payload as EnvoyPayload, GrpcHealthCheck as EnvoyGrpcHealthCheck, + HealthChecker as EnvoyHealthChecker, HttpHealthCheck as EnvoyHttpHealthCheck, + Payload as EnvoyPayloadOption, TcpHealthCheck as EnvoyTcpHealthCheck, + }, + HealthCheck as EnvoyHealthCheck, RequestMethod, + }, + r#type::v3::{CodecClientType, Int64Range}, + }; + use std::{ops::Range, str::FromStr}; + + impl TryFrom for HealthCheck { + type Error = GenericError; + fn try_from(value: EnvoyHealthCheck) -> Result { + let EnvoyHealthCheck { + timeout, + interval, + initial_jitter, + interval_jitter, + interval_jitter_percent, + unhealthy_threshold, + healthy_threshold, + alt_port, + reuse_connection, + no_traffic_interval, + no_traffic_healthy_interval, + unhealthy_interval, + unhealthy_edge_interval, + healthy_edge_interval, + event_log_path, + event_logger, + event_service, + always_log_health_check_failures, + tls_options, + transport_socket_match_criteria, + health_checker, + always_log_health_check_success, + } = value; + unsupported_field!( + // timeout, + // interval, + // initial_jitter, + // interval_jitter, + // interval_jitter_percent, + // unhealthy_threshold, + // healthy_threshold, + alt_port, + reuse_connection, + no_traffic_interval, + no_traffic_healthy_interval, + // unhealthy_interval, + // unhealthy_edge_interval, + // healthy_edge_interval, + event_log_path, + event_logger, + event_service, + always_log_health_check_failures, + tls_options, + transport_socket_match_criteria, // health_checker + always_log_health_check_success + )?; + let timeout = duration_from_envoy(required!(timeout)?).map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {timeout} to std::time::Duration", e) + .with_node("timeout") + })?; + let interval = duration_from_envoy(required!(interval)?).map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {interval} to std::time::Duration", e) + .with_node("interval") + })?; + let initial_jitter = initial_jitter.map(duration_from_envoy).transpose().map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {initial_jitter} to std::time::Duration", e) + .with_node("initial_jitter") + })?; + let interval_jitter = interval_jitter.map(duration_from_envoy).transpose().map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {interval_jitter} to std::time::Duration", e) + .with_node("interval_jitter") + })?; + let interval_jitter_percent = { + let as_float = interval_jitter_percent as f32; + if !as_float.is_finite() || as_float > 1.0 || as_float.is_sign_negative() { + Err(GenericError::from_msg(format!( + "Invalid jitter percentage {as_float}. Jitter percentage has to be within the range [0.0, 1.0]" + ))) + } else { + Ok(as_float) + } + } + .with_node("interval_jitter_percent")?; + let unhealthy_threshold = required!(unhealthy_threshold)?.value; + let unhealthy_threshold = unhealthy_threshold + .try_into() + .map_err(|_| { + GenericError::from_msg(format!("invalid value {unhealthy_threshold}. Must be less than 65536.")) + }) + .with_node("unhealthy_threshold")?; + let healthy_threshold = required!(healthy_threshold)?.value; + let healthy_threshold = healthy_threshold + .try_into() + .map_err(|_| { + GenericError::from_msg(format!("invalid value {healthy_threshold}. Must be less than 65536.")) + }) + .with_node("healthy_threshold")?; + let unhealthy_interval = unhealthy_interval.map(duration_from_envoy).transpose().map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {unhealthy_interval} to std::time::Duration", e) + .with_node("unhealthy_interval") + })?; + let unhealthy_edge_interval = + unhealthy_edge_interval.map(duration_from_envoy).transpose().map_err(|e| { + GenericError::from_msg_with_cause( + "failed to convert {unhealthy_edge_interval} to std::time::Duration", + e, + ) + .with_node("unhealthy_edge_interval") + })?; + let healthy_edge_interval = healthy_edge_interval.map(duration_from_envoy).transpose().map_err(|e| { + GenericError::from_msg_with_cause("failed to convert {healthy_edge_interval} to std::time::Duration", e) + .with_node("healthy_edge_interval") + })?; + let protocol = convert_opt!(health_checker)?; + Ok(Self { + cluster: super::ClusterHealthCheck { + timeout, + interval, + initial_jitter, + interval_jitter, + interval_jitter_percent, + unhealthy_threshold, + healthy_threshold, + unhealthy_interval, + unhealthy_edge_interval, + healthy_edge_interval, + }, + protocol, + }) + } + } + + impl TryFrom for HealthCheckProtocol { + type Error = GenericError; + fn try_from(value: EnvoyHealthChecker) -> Result { + match value { + EnvoyHealthChecker::HttpHealthCheck(envoy) => envoy.try_into().map(Self::Http), + EnvoyHealthChecker::TcpHealthCheck(envoy) => Ok(Self::Tcp(envoy.try_into()?)), + EnvoyHealthChecker::CustomHealthCheck(_) => Err(GenericError::unsupported_variant("CustomHealthCheck")), + EnvoyHealthChecker::GrpcHealthCheck(_) => Err(GenericError::unsupported_variant("GrpcHealthCheck")), + } + } + } + + fn status_range_vec_from_i64_rang_vec(statuses: Vec) -> Result>, GenericError> { + statuses + .into_iter() + .map(|Int64Range { start, end }| { + if start >= end { + Err(GenericError::from_msg(format!( + "invalid range [{start},{end}). End has to be greater than end" + ))) + } else if start < 100 || end >= 600 { + Err(GenericError::from_msg("invalid range [{start},{end}). Range has to be within [100,600).")) + } else { + Ok((start as u16)..(end as u16)) + } + }) + .collect() + } + + impl TryFrom for HttpHealthCheck { + type Error = GenericError; + fn try_from(value: EnvoyHttpHealthCheck) -> Result { + let EnvoyHttpHealthCheck { + host, + path, + send, + receive, + response_buffer_size, + request_headers_to_add, + request_headers_to_remove, + expected_statuses, + retriable_statuses, + codec_client_type, + service_name_matcher, + method, + } = value; + unsupported_field!( + // host, + // path, + send, + receive, + response_buffer_size, + request_headers_to_add, + request_headers_to_remove, + // expected_statuses, + // retriable_statuses, + // codec_client_type, + service_name_matcher // method + )?; + let method = RequestMethod::from_i32(method) + .ok_or_else(|| GenericError::unsupported_variant(format!("[unknown RequestMethod value {method}"))) + .with_node("method")?; + let method = match method { + RequestMethod::Get | RequestMethod::MethodUnspecified => Ok(Method::GET), + x => Err(GenericError::unsupported_variant(format!("{x:?}"))), + } + .with_node("method")?; + let http_version = CodecClientType::from_i32(codec_client_type) + .ok_or_else(|| GenericError::unsupported_variant(format!("[unknown RequestMethod value {method}"))) + .with_node("codec_client_type")?; + let http_version = match http_version { + CodecClientType::Http1 => Ok(Codec::Http1), + CodecClientType::Http2 => Ok(Codec::Http2), + CodecClientType::Http3 => Err(GenericError::unsupported_variant("Http3")), + } + .with_node("codec_client_type")?; + let host = host + .is_used() + .then(|| Authority::from_str(&host)) + .transpose() + .map_err(|e| { + GenericError::from_msg_with_cause(format!("Failed to convert \"{host}\" to a HeaderValue"), e) + }) + .with_node("host")?; + + let expected_statuses = if expected_statuses.is_empty() { + default_expected_statuses() + } else { + status_range_vec_from_i64_rang_vec(expected_statuses).with_node("expected_statuses")? + }; + let retriable_statuses = + status_range_vec_from_i64_rang_vec(retriable_statuses).with_node("retriable_statuses")?; + let path = path + .is_used() + .then(|| { + let path_and_query = PathAndQuery::from_str(&path).map_err(|e| { + GenericError::from_msg_with_cause(format!("Failed to parse \"{path}\" as Path"), e) + })?; + if path_and_query.query().is_some() { + Err(GenericError::from_msg("path can't contain query")) + } else { + Ok(path_and_query) + } + }) + .transpose() + .with_node("path")?; + Ok(Self { path, method, http_version, host, expected_statuses, retriable_statuses }) + } + } + + fn try_convert_payload(payload: EnvoyPayloadOption) -> Option, GenericError>> { + let EnvoyPayloadOption { payload } = payload; + + payload.map(|payload| match payload { + EnvoyPayload::Text(text) => try_convert_text_payload(&text), + EnvoyPayload::Binary(binary) => Ok(binary.clone()), + }) + } + + // The documentation doesn't specify the details of this conversion, so this is based on: + // https://github.com/envoyproxy/envoy/blob/v1.32.1/source/common/common/hex.cc + fn try_convert_text_payload(text_payload: &str) -> Result, GenericError> { + // This check guarantees that the match below doesn't panic + if text_payload.len() % 2 != 0 { + return Err(GenericError::from_msg("invalid text payload with odd number of characters")); + } + + let mut bytes = Vec::with_capacity(text_payload.len() / 2); + let mut chars = text_payload.chars(); + loop { + match (chars.next(), chars.next()) { + (Some(msb), Some(lsb)) => bytes.push(parse_hex_chars(msb, lsb)?), + (Some(_), None) | (None, Some(_)) => { + // This case should not happen because we check the length at the beginning + unreachable!("unexpected number of characters in text payload") + }, + (None, None) => break, + } + } + + Ok(bytes) + } + + fn parse_hex_chars(msb: char, lsb: char) -> Result { + // char::to_digit(16) is better than u8::from_str_radix(s, 16) because the latter accepts an initial '+' + match (msb.to_digit(16), lsb.to_digit(16)) { + (Some(msb), Some(lsb)) if msb <= 0xf && lsb <= 0xf => { + // this cast is valid because the match checks the upper bound + #[allow(clippy::cast_possible_truncation)] + let byte = ((msb << 4) + lsb) as u8; + Ok(byte) + }, + _ => Err(GenericError::from_msg("invalid text payload")), + } + } + + impl TryFrom for TcpHealthCheck { + type Error = GenericError; + + fn try_from(value: EnvoyTcpHealthCheck) -> Result { + let EnvoyTcpHealthCheck { send, receive, proxy_protocol_config: _ } = value; + + Ok(TcpHealthCheck { + send: send.and_then(try_convert_payload).transpose()?, + receive: receive.into_iter().filter_map(try_convert_payload).collect::, _>>()?, + }) + } + } + + impl TryFrom for GrpcHealthCheck { + type Error = GenericError; + fn try_from(value: EnvoyGrpcHealthCheck) -> Result { + let EnvoyGrpcHealthCheck { service_name, authority, initial_metadata } = value; + unsupported_field!(authority, initial_metadata)?; + + Ok(GrpcHealthCheck { service_name: service_name.into() }) + } + } + + #[cfg(test)] + mod health_check_tests { + use crate::config::cluster::health_check::envoy_conversions::try_convert_text_payload; + + fn assert_parsed(payload: &str, expected: &[u8]) { + assert_eq!(try_convert_text_payload(payload).expect("payload parsing failed"), expected); + } + + fn assert_err(payload: &str) { + assert!(try_convert_text_payload(payload).is_err(), "payload should be invalid"); + } + + #[test] + fn text_payload_parsing() { + assert_parsed("", &[]); + + for byte in 0..u8::MAX { + assert_parsed(&format!("{byte:02x}"), &[byte]); + } + + assert_parsed("0000", &[0x0, 0x0]); + assert_parsed("0001", &[0x0, 0x1]); + assert_parsed("abba", &[0xab, 0xba]); + + assert_err("0"); + assert_err("000"); + assert_err("00000"); + assert_err("+000"); + assert_err("-000"); + assert_err("zz"); + } + } +} diff --git a/orion-configuration/src/config/cluster/http_protocol_options.rs b/orion-configuration/src/config/cluster/http_protocol_options.rs new file mode 100644 index 00000000..a39e7b5b --- /dev/null +++ b/orion-configuration/src/config/cluster/http_protocol_options.rs @@ -0,0 +1,398 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::common::is_default; +use serde::{Deserialize, Serialize}; +use std::{num::NonZeroU32, time::Duration}; + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, Default)] +#[serde(rename_all = "UPPERCASE")] +pub enum Codec { + #[default] + Http1, + Http2, +} + +impl Codec { + pub fn is_http1(&self) -> bool { + matches!(self, Self::Http1) + } + pub fn is_http2(&self) -> bool { + matches!(self, Self::Http2) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)] +pub struct HttpProtocolOptions { + #[serde(skip_serializing_if = "is_default", default)] + pub codec: Codec, + #[serde(skip_serializing_if = "is_default", default, flatten)] + pub common: CommonHttpOptions, + #[serde(skip_serializing_if = "is_default", default)] + pub http2_options: Http2ProtocolOptions, + #[serde(skip_serializing_if = "is_default", default)] + pub http1_options: Http1ProtocolOptions, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)] +pub struct CommonHttpOptions { + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + #[serde(with = "humantime_serde")] + pub idle_timeout: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +enum UpstreamHttpProtocolOptions { + Explicit(ExplicitProtocolOptions), +} + +impl Default for UpstreamHttpProtocolOptions { + fn default() -> Self { + Self::Explicit(ExplicitProtocolOptions::default()) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(tag = "http_version", rename_all = "UPPERCASE")] +enum ExplicitProtocolOptions { + Http1(Http1ProtocolOptions), + Http2(Http2ProtocolOptions), +} + +impl Default for ExplicitProtocolOptions { + fn default() -> Self { + Self::Http1(Http1ProtocolOptions::default()) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, Default)] +pub struct Http1ProtocolOptions; + +#[derive(Debug, Clone, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct Http2ProtocolOptions { + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub keep_alive_settings: Option, + // Envoy limits this to 2^31-1, h2 says 0 is valid + // envoy accepts up from 1. + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub max_concurrent_streams: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub initial_stream_window_size: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub initial_connection_window_size: Option, +} + +impl Http2ProtocolOptions { + pub fn max_concurrent_streams(&self) -> Option { + self.max_concurrent_streams.map(usize::from) + } + pub fn initial_stream_window_size(&self) -> Option { + self.initial_stream_window_size.map(u32::from) + } + pub fn initial_connection_window_size(&self) -> Option { + self.initial_connection_window_size.map(u32::from) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct Http2KeepAliveSettings { + #[serde(with = "humantime_serde")] + pub keep_alive_interval: Duration, + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub keep_alive_timeout: Option, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ + Codec, CommonHttpOptions, ExplicitProtocolOptions, Http1ProtocolOptions, Http2KeepAliveSettings, + Http2ProtocolOptions, HttpProtocolOptions, UpstreamHttpProtocolOptions, + }; + use crate::config::{common::*, util::duration_from_envoy}; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::core::v3::{ + Http1ProtocolOptions as EnvoyHttp1ProtocolOptions, Http2ProtocolOptions as EnvoyHttp2ProtocolOptions, + HttpProtocolOptions as EnvoyCommonHttpProtocolOptions, KeepaliveSettings, + }, + extensions::upstreams::http::v3::{ + http_protocol_options::{ + explicit_http_config::ProtocolConfig as EnvoyProtocolConfig, + ExplicitHttpConfig as EnvoyExplicitHttpConfig, + UpstreamProtocolOptions as EnvoyUpstreamProtocolOptions, + }, + HttpProtocolOptions as EnvoyHttpProtocolOptions, + }, + }, + google::protobuf::Any, + prost::Message, + }; + + pub(crate) enum SupportedEnvoyProtocolOptions { + HttpProtocolOptions(EnvoyHttpProtocolOptions), + } + + impl TryFrom for SupportedEnvoyProtocolOptions { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" => { + EnvoyHttpProtocolOptions::decode(typed_config.value.as_slice()) + .map(Self::HttpProtocolOptions) + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + }, + s => Err(GenericError::unsupported_variant(s.to_owned())), + } + } + } + + impl TryFrom for HttpProtocolOptions { + type Error = GenericError; + fn try_from(envoy: Any) -> Result { + SupportedEnvoyProtocolOptions::try_from(envoy)?.try_into() + } + } + + impl TryFrom for HttpProtocolOptions { + type Error = GenericError; + fn try_from(value: SupportedEnvoyProtocolOptions) -> Result { + match value { + SupportedEnvoyProtocolOptions::HttpProtocolOptions(x) => x.try_into(), + } + } + } + + impl TryFrom for CommonHttpOptions { + type Error = GenericError; + fn try_from(value: EnvoyCommonHttpProtocolOptions) -> Result { + let EnvoyCommonHttpProtocolOptions { + idle_timeout, + max_connection_duration, + max_headers_count, + max_stream_duration, + headers_with_underscores_action, + max_requests_per_connection, + max_response_headers_kb, + } = value; + unsupported_field!( + // idle_timeout, + max_connection_duration, + max_headers_count, + max_stream_duration, + headers_with_underscores_action, + max_requests_per_connection, + max_response_headers_kb + )?; + let idle_timeout = idle_timeout + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("Failed to convert to duration"))?; + Ok(Self { idle_timeout }) + } + } + + impl TryFrom for HttpProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyHttpProtocolOptions) -> Result { + let EnvoyHttpProtocolOptions { + common_http_protocol_options, + upstream_http_protocol_options, + http_filters, + header_validation_config, + upstream_protocol_options, + } = value; + unsupported_field!( + // common_http_protocol_options, + upstream_http_protocol_options, + http_filters, + header_validation_config // upstream_protocol_options + )?; + let upstream_protocol_options = upstream_protocol_options + .map(UpstreamHttpProtocolOptions::try_from) + .transpose() + .with_node("upstream_protocol_options")? + .unwrap_or_default(); + let common = common_http_protocol_options.map(CommonHttpOptions::try_from).transpose()?.unwrap_or_default(); + let (codec, http1_options, http2_options) = match upstream_protocol_options { + UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http1(http1)) => { + (Codec::Http1, http1, Default::default()) + }, + UpstreamHttpProtocolOptions::Explicit(ExplicitProtocolOptions::Http2(http2)) => { + (Codec::Http2, Default::default(), http2) + }, + }; + + Ok(Self { common, codec, http1_options, http2_options }) + } + } + + impl TryFrom for UpstreamHttpProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyUpstreamProtocolOptions) -> Result { + match value { + EnvoyUpstreamProtocolOptions::ExplicitHttpConfig(envoy) => envoy.try_into().map(Self::Explicit), + EnvoyUpstreamProtocolOptions::AutoConfig(_) => Err(GenericError::unsupported_variant("AutoConfig")), + EnvoyUpstreamProtocolOptions::UseDownstreamProtocolConfig(_) => { + Err(GenericError::unsupported_variant("UseDownstreamProtocolConfig")) + }, + } + } + } + + impl TryFrom for ExplicitProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyExplicitHttpConfig) -> Result { + let EnvoyExplicitHttpConfig { protocol_config } = value; + convert_opt!(protocol_config) + } + } + + impl TryFrom for ExplicitProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyProtocolConfig) -> Result { + match value { + EnvoyProtocolConfig::HttpProtocolOptions(envoy) => envoy.try_into().map(Self::Http1), + EnvoyProtocolConfig::Http2ProtocolOptions(envoy) => envoy.try_into().map(Self::Http2), + EnvoyProtocolConfig::Http3ProtocolOptions(_) => { + Err(GenericError::unsupported_variant("Http3ProtocolOptions")) + }, + } + } + } + + impl TryFrom for Http1ProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyHttp1ProtocolOptions) -> Result { + let EnvoyHttp1ProtocolOptions { + allow_absolute_url, + accept_http_10, + default_host_for_http_10, + header_key_format, + enable_trailers, + allow_chunked_length, + override_stream_error_on_invalid_http_message, + send_fully_qualified_url, + use_balsa_parser, + allow_custom_methods, + ignore_http_11_upgrade, + } = value; + unsupported_field!( + allow_absolute_url, + accept_http_10, + default_host_for_http_10, + header_key_format, + enable_trailers, + allow_chunked_length, + override_stream_error_on_invalid_http_message, + send_fully_qualified_url, + use_balsa_parser, + allow_custom_methods, + ignore_http_11_upgrade + )?; + + Ok(Self {}) + } + } + + impl TryFrom for Http2ProtocolOptions { + type Error = GenericError; + fn try_from(value: EnvoyHttp2ProtocolOptions) -> Result { + let EnvoyHttp2ProtocolOptions { + hpack_table_size, + max_concurrent_streams, + initial_stream_window_size, + initial_connection_window_size, + allow_connect, + allow_metadata, + max_outbound_frames, + max_outbound_control_frames, + max_consecutive_inbound_frames_with_empty_payload, + max_inbound_priority_frames_per_stream, + max_inbound_window_update_frames_per_data_frame_sent, + stream_error_on_invalid_http_messaging, + override_stream_error_on_invalid_http_message, + custom_settings_parameters, + connection_keepalive, + use_oghttp2_codec, + max_metadata_size, + } = value; + unsupported_field!( + hpack_table_size, + // max_concurrent_streams, + // initial_stream_window_size, + // initial_connection_window_size, + allow_connect, + allow_metadata, + max_outbound_frames, + max_outbound_control_frames, + max_consecutive_inbound_frames_with_empty_payload, + max_inbound_priority_frames_per_stream, + max_inbound_window_update_frames_per_data_frame_sent, + stream_error_on_invalid_http_messaging, + override_stream_error_on_invalid_http_message, + custom_settings_parameters, + // connection_keepalive, + use_oghttp2_codec, + max_metadata_size + )?; + let max_concurrent_streams = max_concurrent_streams.map(|v| v.value as usize); + let initial_stream_window_size = initial_stream_window_size + .map(|v| v.value.try_into()) + .transpose() + .map_err(|_| GenericError::from_msg("value can't be 0")) + .with_node("initial_stream_window_size")?; + let initial_connection_window_size = initial_connection_window_size + .map(|v| v.value.try_into()) + .transpose() + .map_err(|_| GenericError::from_msg("value can't be 0")) + .with_node("initial_connection_window_size")?; + let keep_alive_settings = connection_keepalive + .map(|KeepaliveSettings { interval, timeout, interval_jitter, connection_idle_interval }| { + unsupported_field!(interval_jitter, connection_idle_interval)?; + Ok(Http2KeepAliveSettings { + keep_alive_interval: duration_from_envoy(required!(interval)?) + .map_err(|_| GenericError::from_msg("failed to convert into Duration")) + .with_node("keep_alive_interval")?, + keep_alive_timeout: timeout + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("failed to convert into Duration")) + .with_node("keep_alive_timeout")?, + }) + }) + .transpose() + .with_node("keep_alive_settings")?; + + Ok(Self { + keep_alive_settings, + max_concurrent_streams, + initial_stream_window_size, + initial_connection_window_size, + }) + } + } +} diff --git a/orion-configuration/src/config/common.rs b/orion-configuration/src/config/common.rs new file mode 100644 index 00000000..fea0a125 --- /dev/null +++ b/orion-configuration/src/config/common.rs @@ -0,0 +1,371 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use regex::Regex; +use std::{ + borrow::Cow, + error::Error, + fmt::{Debug, Display}, +}; + +pub(crate) fn is_default(value: &T) -> bool { + *value == T::default() +} + +pub(crate) trait RegexExtension { + fn matches_full(&self, to_match: &str) -> bool; +} + +impl RegexExtension for Regex { + fn matches_full(&self, to_match: &str) -> bool { + Some(to_match.len()) == self.find_at(to_match, 0).map(|find_result| find_result.len()) + } +} + +enum TraceNode { + Field(Cow<'static, str>), + Name(Cow<'static, str>), + Index(usize), +} + +impl Display for TraceNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TraceNode::Field(field) => f.write_str(field), + TraceNode::Name(name) => f.write_str(&format!("[\"{name}\"]")), + TraceNode::Index(index) => f.write_str(&format!("[{index}]")), + } + } +} + +struct FieldTrace { + vec: Vec, +} + +impl Display for FieldTrace { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let is_named = self.vec.iter().map(|field| matches!(field, TraceNode::Name(_))); + let inner_is_named = is_named.rev().skip(1).chain(std::iter::once(false)); + + let mut iter = self.vec.iter().rev().zip(inner_is_named); + if let Some((first, _)) = iter.next() { + first.fmt(f)?; + } + for (field, inner_is_named) in iter { + if matches!(field, TraceNode::Field(_)) { + f.write_str(" / ")?; + field.fmt(f)?; + } else if matches!(field, TraceNode::Name(_)) || !inner_is_named { + f.write_str(" ")?; + field.fmt(f)?; + } + } + Ok(()) + } +} + +impl Debug for FieldTrace { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + (&self as &dyn Display).fmt(f) + } +} + +impl FieldTrace { + fn new() -> Self { + Self { vec: Vec::new() } + } + + fn push(&mut self, value: TraceNode) { + self.vec.push(value); + } +} + +impl>> From for FieldTrace { + fn from(value: T) -> Self { + Self { vec: vec![TraceNode::Field(value.into())] } + } +} + +#[derive(thiserror::Error, Debug)] +#[allow(private_interfaces)] +pub enum GenericError { + #[error("Error parsing field {0}")] + TracedError(FieldTrace, #[source] Box), + #[error("{0}")] + MessageWithCause(Cow<'static, str>, #[source] Box), + #[error("{0}")] + Message(Cow<'static, str>), + #[error("Unsupported enum variant {0}")] + UnsupportedVariant(Cow<'static, str>), + #[error("Missing field: {0}")] + MissingField(&'static str), + #[error("Unsupported field: {0}")] + UnsupportedField(&'static str), +} + +impl GenericError { + #[must_use] + pub(crate) fn with_node>>(self, node: T) -> Self { + self.with_trace_node(TraceNode::Field(node.into())) + } + + #[must_use] + pub(crate) fn with_index(self, index: usize) -> Self { + self.with_trace_node(TraceNode::Index(index)) + } + + #[must_use] + pub(crate) fn with_name>>(self, name: T) -> Self { + self.with_trace_node(TraceNode::Name(name.into())) + } + + #[must_use] + fn with_trace_node(self, node: TraceNode) -> Self { + match self { + Self::TracedError(mut fields, error) => { + fields.push(node); + Self::TracedError(fields, error) + }, + other => { + let mut fields = FieldTrace::new(); + fields.push(node); + Self::TracedError(fields, other.into()) + }, + } + } + + pub(crate) fn unsupported_variant>>(variant: T) -> Self { + Self::UnsupportedVariant(variant.into()) + } + + pub fn from_msg>>(msg: T) -> Self { + Self::Message(msg.into()) + } + + pub fn from_msg_with_cause>, E: Error + Send + Sync + 'static>(msg: T, cause: E) -> Self { + Self::MessageWithCause(msg.into(), cause.into()) + } +} + +pub(crate) trait WithNodeOnResult { + fn with_node>>(self, node: T) -> Self; + fn with_index(self, index: usize) -> Self; + fn with_name>>(self, name: T) -> Self; +} + +impl WithNodeOnResult for Result { + fn with_node>>(self, node: Node) -> Self { + self.map_err(|e| e.with_node(node)) + } + + fn with_index(self, index: usize) -> Self { + self.map_err(|e| e.with_index(index)) + } + + fn with_name>>(self, name: Node) -> Self { + self.map_err(|e| e.with_name(name)) + } +} + +#[cfg(feature = "envoy-conversions")] +pub use envoy_conversions::*; + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + use super::*; + use std::{collections::HashMap, hash::BuildHasher}; + + /// Trait that checks if an envoy field was explicitly set by the user. + /// Used to check that the user isn't using unsupported fields + /// + /// for options this is straightforwardly "is_some" but for other fields it + /// gets a bit more complicated. + /// + /// integers,and enums reprsented by integers, are not `Option` bur plain `i32` + /// and will be initialized to their default value (0) if not set. + /// Same for bools which become false. + /// Therefore we should always support this default value, since we can't tell if the user + /// explicitly requested this behaviour or if it was default-filled. + /// + /// Strings and collections will be set to their empty equivalent. + pub trait IsUsed { + type Checked; + fn is_used(&self) -> bool; + fn checked(self, name: &'static str) -> Result; + } + + impl IsUsed for Option { + type Checked = T; + fn is_used(&self) -> bool { + self.is_some() + } + + fn checked(self, name: &'static str) -> Result { + self.ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for Vec { + //non-empty vec + type Checked = Vec; + fn is_used(&self) -> bool { + !self.is_empty() + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for String { + type Checked = Self; + fn is_used(&self) -> bool { + !self.is_empty() + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for bool { + type Checked = Self; + fn is_used(&self) -> bool { + //protobuf default is false + *self + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for i32 { + type Checked = Self; + fn is_used(&self) -> bool { + //protobuf default is zero + *self != 0 + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for i64 { + type Checked = Self; + fn is_used(&self) -> bool { + //protobuf default is zero + *self != 0 + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for u32 { + type Checked = Self; + fn is_used(&self) -> bool { + //protobuf default is zero + *self != 0 + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + + impl IsUsed for HashMap { + type Checked = Self; + fn is_used(&self) -> bool { + !self.is_empty() + } + fn checked(self, name: &'static str) -> Result { + self.is_used().then_some(self).ok_or(GenericError::MissingField(name)) + } + } + // it would be nice to allow for x = "y" syntax to overwrite the field name, since some fields are + // named differently in the code vs config file and having the code-local name might confuse an end-user + macro_rules! unsupported_field { + ($field:ident) => { + if $field.is_used() { + #[allow(dropping_copy_types, clippy::drop_non_drop)] + drop($field); + Err(GenericError::UnsupportedField(stringify!($field))) + } else { + #[allow(dropping_copy_types, clippy::drop_non_drop)] + drop($field); + Ok(()) + } + }; + ($field:ident, $($tail:ident),+) => { + if $field.is_used() { + #[allow(dropping_copy_types, clippy::drop_non_drop)] + drop($field); + Err(GenericError::UnsupportedField(stringify!($field))) + } else { + unsupported_field! ($($tail),+) + } + }; + + } + pub(crate) use unsupported_field; + + macro_rules! required { + ($field:ident) => { + $field.checked(stringify!($field)) + }; + } + pub(crate) use required; + + macro_rules! convert_opt { + ($field:ident) => { + match $field { + None => Err(GenericError::MissingField(stringify!($field))), + Some(envoy) => envoy.try_into().map_err(|e: GenericError| e.with_node(stringify!($field))), + } + }; + ($field:ident, $field_name:expr) => { + match $field { + None => Err(GenericError::MissingField($field_name)), + Some(envoy) => envoy.try_into().map_err(|e: GenericError| e.with_node($field_name)), + } + }; + } + pub(crate) use convert_opt; + + macro_rules! convert_non_empty_vec { + ($field:ident) => { + if !$field.is_used() { + Err(GenericError::MissingField(stringify!($field))) + } else { + convert_vec!($field) + } + }; + } + pub(crate) use convert_non_empty_vec; + + macro_rules! convert_vec { + ($field:ident) => { + $field + .into_iter() + .enumerate() + .map(|(index, envoy)| envoy.try_into().with_index(index)) + .collect::, GenericError>>() + .map_err(|e: GenericError| e.with_node(stringify!($field))) + }; + } + pub(crate) use convert_vec; +} diff --git a/orion-configuration/src/config/core.rs b/orion-configuration/src/config/core.rs new file mode 100644 index 00000000..586896d6 --- /dev/null +++ b/orion-configuration/src/config/core.rs @@ -0,0 +1,382 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::common::*; +use base64::engine::general_purpose::STANDARD; +use base64_serde::base64_serde_type; +use compact_str::CompactString; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::Debug, + io::{BufRead, BufReader, Read}, +}; +base64_serde_type!(Base64Standard, STANDARD); + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum DataSource { + Path(CompactString), + InlineBytes(#[serde(with = "Base64Standard")] Vec), + InlineString(CompactString), + EnvironmentVariable(CompactString), +} + +#[derive(thiserror::Error, Debug)] +pub enum DataSourceReadError { + #[error("failed to read file \"{0}\"")] + IoError(CompactString, #[source] std::io::Error), + #[error("failed to read environment variable \"{0}\"")] + EnvError(CompactString, #[source] std::env::VarError), +} + +impl DataSource { + pub fn to_bytes_blocking(&self) -> Result, DataSourceReadError> { + match self { + Self::InlineString(b) => Ok(b.as_bytes().to_owned()), + Self::InlineBytes(b) => Ok(b.clone()), + Self::Path(path) => std::fs::read(path).map_err(|e| DataSourceReadError::IoError(path.clone(), e)), + Self::EnvironmentVariable(key) => { + std::env::var(key).map(String::into_bytes).map_err(|e| DataSourceReadError::EnvError(key.clone(), e)) + }, + } + } + + pub fn into_buf_read(&self) -> Result, DataSourceReadError> { + DataSourceReader::new(self) + } +} + +pub enum DataSourceReader<'a> { + Path(BufReader), + InlineBytes(&'a [u8]), + OwnedBytes { bytes: Box<[u8]>, read: usize }, +} + +impl<'a> DataSourceReader<'a> { + pub fn new(inner: &'a DataSource) -> Result { + Ok(match inner { + DataSource::EnvironmentVariable(_) => { + let bytes = inner.to_bytes_blocking()?.into_boxed_slice(); + Self::OwnedBytes { bytes, read: 0 } + }, + DataSource::InlineString(s) => Self::InlineBytes(s.as_bytes()), + DataSource::InlineBytes(b) => Self::InlineBytes(b.as_slice()), + DataSource::Path(p) => { + let reader = + BufReader::new(std::fs::File::open(p).map_err(|e| DataSourceReadError::IoError(p.clone(), e))?); + Self::Path(reader) + }, + }) + } +} + +impl<'a> Read for DataSourceReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + match self { + Self::OwnedBytes { bytes, read } => { + let avail_source = bytes.len() - *read; + let avail_target = buf.len(); + let copied = avail_source.min(avail_target); + buf[..copied].copy_from_slice(&bytes[*read..(*read + copied)]); + *read += copied; + Ok(copied) + }, + Self::InlineBytes(b) => b.read(buf), + Self::Path(reader) => reader.read(buf), + } + } +} + +impl<'a> BufRead for DataSourceReader<'a> { + fn fill_buf(&mut self) -> std::io::Result<&[u8]> { + match self { + Self::OwnedBytes { bytes, read } => Ok(&bytes[*read..]), + Self::InlineBytes(b) => b.fill_buf(), + Self::Path(reader) => reader.fill_buf(), + } + } + + fn consume(&mut self, amt: usize) { + match self { + Self::OwnedBytes { bytes: _, read } => *read += amt, + Self::InlineBytes(b) => b.consume(amt), + Self::Path(reader) => reader.consume(amt), + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct StringMatcher { + // does not apply to regex + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/type/matcher/v3/string.proto#type-matcher-v3-stringmatcher + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + pub ignore_case: bool, + #[serde(flatten)] + pub pattern: StringMatcherPattern, +} + +pub(crate) struct CaseSensitive<'a>(pub bool, pub &'a str); +impl<'a> CaseSensitive<'a> { + #[inline] + pub fn equals(&self, b: &str) -> bool { + if self.0 { + self.1 == b + } else { + self.1.eq_ignore_ascii_case(b) + } + } + + #[inline] + pub fn starts_with(&self, prefix: &str) -> bool { + if self.0 { + self.1.starts_with(prefix) + } else { + prefix.len() <= self.1.len() && prefix.eq_ignore_ascii_case(&self.1[..prefix.len()]) + } + } + + #[inline] + pub fn ends_with(&self, suffix: &str) -> bool { + if self.0 { + self.1.ends_with(suffix) + } else { + let slen = suffix.len(); + slen <= self.1.len() && suffix.eq_ignore_ascii_case(&self.1[self.1.len() - slen..]) + } + } + + #[inline] + pub fn find(&self, needle: &str) -> Option { + if self.0 { + self.1.find(needle) + } else { + if needle.len() <= self.1.len() { + for i in 0..=(self.1.len() - needle.len()) { + if self.1[i..i + needle.len()].eq_ignore_ascii_case(needle) { + return Some(i); + } + } + } + None + } + } + + #[inline] + pub fn contains(&self, needle: &str) -> bool { + self.find(needle).is_some() + } +} + +impl StringMatcher { + pub fn matches(&self, to_match: &str) -> bool { + let casematcher = CaseSensitive(!self.ignore_case, to_match); + match &self.pattern { + StringMatcherPattern::Exact(s) => casematcher.equals(s), + StringMatcherPattern::Prefix(prefix) => casematcher.starts_with(prefix), + StringMatcherPattern::Suffix(suffix) => casematcher.ends_with(suffix), + StringMatcherPattern::Contains(needle) => casematcher.contains(needle), + StringMatcherPattern::Regex(r) => r.matches_full(to_match), + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum StringMatcherPattern { + Exact(CompactString), + Prefix(CompactString), + Suffix(CompactString), + Contains(CompactString), + Regex(#[serde(with = "serde_regex")] Regex), +} + +impl PartialEq for StringMatcherPattern { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Regex(r1), Self::Regex(r2)) => r1.as_str().eq(r2.as_str()), + (Self::Exact(s1), Self::Exact(s2)) + | (Self::Prefix(s1), Self::Prefix(s2)) + | (Self::Suffix(s1), Self::Suffix(s2)) + | (Self::Contains(s1), Self::Contains(s2)) => s1.eq(s2), + _ => false, + } + } +} + +impl Eq for StringMatcherPattern {} + +#[cfg(feature = "envoy-conversions")] +pub(crate) use envoy_conversions::*; + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{DataSource, StringMatcher, StringMatcherPattern}; + use crate::config::common::*; + use ipnet::IpNet; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::core::v3::{ + address::Address as EnvoyAddress, data_source::Specifier as EnvoySpecifier, socket_address::PortSpecifier, + Address as EnvoyOuterAddress, CidrRange as EnvoyCidrRange, DataSource as EnvoyDataSource, + SocketAddress as EnvoySocketAddress, + }, + r#type::matcher::v3::{ + string_matcher::MatchPattern as EnvoyStringMatcherPattern, RegexMatcher as EnvoyRegexMatcher, + StringMatcher as EnvoyStringMatcher, + }, + }; + use regex::{Regex, RegexBuilder}; + use std::net::SocketAddr; + + pub struct CidrRange(IpNet); + + impl CidrRange { + pub fn into_ipnet(self) -> IpNet { + self.0 + } + } + + pub struct Address(SocketAddr); + impl Address { + pub fn into_socket_addr(self) -> SocketAddr { + self.0 + } + } + + impl TryFrom for CidrRange { + type Error = GenericError; + fn try_from(value: EnvoyCidrRange) -> Result { + let EnvoyCidrRange { address_prefix, prefix_len } = value; + let address_prefix = address_prefix.parse::().map_err(|e| { + GenericError::from_msg_with_cause("failed to parse \"{address_prefix}\" as an ip adress", e) + .with_node("address_prefix") + })?; + // defaults to 0 when unset + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/address.proto#envoy-v3-api-msg-config-core-v3-cidrrange + let prefix_len = prefix_len.map(|v| v.value).unwrap_or(0); + let prefix_len = u8::try_from(prefix_len).map_err(|_| { + GenericError::from_msg(format!("failed to convert {prefix_len} to a u8")).with_node("prefix_len") + })?; + let ip_net = IpNet::new(address_prefix, prefix_len).map_err(|e| { + GenericError::from_msg_with_cause( + format!( + "failed to make a cidr range from address_prefix {address_prefix} and prefix_len {prefix_len}" + ), + e, + ) + })?; + Ok(Self(ip_net)) + } + } + + impl TryFrom for Address { + type Error = GenericError; + fn try_from(value: EnvoyOuterAddress) -> Result { + let EnvoyOuterAddress { address } = value; + required!(address)?.try_into() + } + } + + impl TryFrom for Address { + type Error = GenericError; + fn try_from(value: EnvoyAddress) -> Result { + match value { + EnvoyAddress::SocketAddress(sock) => sock.try_into(), + EnvoyAddress::Pipe(_) => Err(GenericError::unsupported_variant("Pipe")), + EnvoyAddress::EnvoyInternalAddress(_) => Err(GenericError::unsupported_variant("EnvoyInternalAddress")), + } + } + } + + impl TryFrom for Address { + type Error = GenericError; + fn try_from(value: EnvoySocketAddress) -> Result { + let EnvoySocketAddress { + protocol, + address, + resolver_name, + ipv4_compat, + port_specifier, + network_namespace_filepath: _, + } = value; + unsupported_field!(protocol, resolver_name, ipv4_compat)?; + let address = required!(address)?; + let port_specifier = match required!(port_specifier)? { + PortSpecifier::NamedPort(_) => Err(GenericError::unsupported_variant("NamedPort")), + PortSpecifier::PortValue(port) => Ok(port), + }?; + let port = u16::try_from(port_specifier).map_err(|_| { + GenericError::from_msg(format!("failed to convert {port_specifier} to a port number")) + .with_node("port_specifier") + })?; + let ip = address.parse::().map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse \"{address}\" as an ip adress"), e) + .with_node("address") + })?; + Ok(Address(SocketAddr::new(ip, port))) + } + } + impl TryFrom for DataSource { + type Error = GenericError; + fn try_from(envoy: EnvoyDataSource) -> Result { + let EnvoyDataSource { specifier, watched_directory: _ } = envoy; + let specifier = required!(specifier)?; + Ok(match specifier { + EnvoySpecifier::InlineBytes(b) => Self::InlineBytes(b), + EnvoySpecifier::InlineString(s) => Self::InlineString(s.into()), + EnvoySpecifier::Filename(filename) => Self::Path(filename.into()), + EnvoySpecifier::EnvironmentVariable(var) => Self::EnvironmentVariable(var.into()), + }) + } + } + impl TryFrom for StringMatcher { + type Error = GenericError; + fn try_from(value: EnvoyStringMatcher) -> Result { + let EnvoyStringMatcher { ignore_case, match_pattern } = value; + let pattern = convert_opt!(match_pattern)?; + Ok(Self { ignore_case, pattern }) + } + } + + impl TryFrom for StringMatcherPattern { + type Error = GenericError; + fn try_from(value: EnvoyStringMatcherPattern) -> Result { + match value { + EnvoyStringMatcherPattern::Exact(s) => Ok(Self::Exact(s.into())), + EnvoyStringMatcherPattern::Contains(s) => Ok(Self::Contains(s.into())), + EnvoyStringMatcherPattern::Prefix(s) => Ok(Self::Prefix(s.into())), + EnvoyStringMatcherPattern::Suffix(s) => Ok(Self::Suffix(s.into())), + EnvoyStringMatcherPattern::SafeRegex(r) => Ok(Self::Regex(regex_from_envoy(r)?)), + EnvoyStringMatcherPattern::Custom(_) => { + Err(GenericError::UnsupportedField("EnvoyStringMatcherPattern::Custom")) + }, + } + } + } + + pub fn regex_from_envoy(envoy: EnvoyRegexMatcher) -> Result { + let EnvoyRegexMatcher { regex, engine_type } = envoy; + unsupported_field!(engine_type)?; + RegexBuilder::new(®ex) + .build() + .map_err(|e| GenericError::from_msg_with_cause(format!("failed to convert \"{regex}\" into a regex"), e)) + } +} diff --git a/orion-configuration/src/config/listener.rs b/orion-configuration/src/config/listener.rs new file mode 100644 index 00000000..b88a93f6 --- /dev/null +++ b/orion-configuration/src/config/listener.rs @@ -0,0 +1,691 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{ + network_filters::{HttpConnectionManager, NetworkRbac, TcpProxy}, + transport::{BindDevice, CommonTlsContext}, + GenericError, +}; +use compact_str::CompactString; +use ipnet::IpNet; +use serde::{Deserialize, Serialize, Serializer}; +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + str::FromStr, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct Listener { + pub name: CompactString, + pub address: SocketAddr, + #[serde(with = "serde_filterchains")] + pub filter_chains: HashMap, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub bind_device: Option, + #[serde(skip_serializing_if = "std::ops::Not::not", default)] + pub with_tls_inspector: bool, +} + +mod serde_filterchains { + use serde::Deserializer; + + use crate::config::is_default; + + use super::*; + pub fn serialize( + value: &HashMap, + serializer: S, + ) -> Result { + fn is_default_ref(fcm: &&FilterChainMatch) -> bool { + is_default(*fcm) + } + #[derive(Serialize)] + struct SerializeAs<'a> { + #[serde(rename = "filterchain_match", skip_serializing_if = "is_default_ref")] + key: &'a FilterChainMatch, + #[serde(flatten)] + value: &'a FilterChain, + } + serializer.collect_seq(value.iter().map(|(key, value)| SerializeAs { key, value })) + } + + pub fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result, D::Error> { + #[derive(Deserialize)] + struct DeserializeAs { + #[serde(rename = "filterchain_match", default)] + key: FilterChainMatch, + #[serde(flatten)] + value: FilterChain, + } + let kvp = Vec::::deserialize(deserializer)?; + let vec_len = kvp.len(); + let hashmap = kvp.into_iter().map(|DeserializeAs { key, value }| (key, value)).collect::>(); + match hashmap.len() { + 0 => Err(serde::de::Error::custom("Listener needs atleast one filter_chain")), + x if x == vec_len => Ok(hashmap), + _ => Err(serde::de::Error::custom("all match statements in a filterchain have to be unique")), + } + } +} +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct FilterChain { + pub name: CompactString, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub tls_config: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub rbac: Vec, + pub terminal_filter: MainFilter, +} + +//todo(hayley): neater serialize/deserialize +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +pub struct ServerNameMatch { + //eg example.com + name: CompactString, + // should we also match on anything.example.com? (but not anythingexample.com) + match_subdomains: bool, +} + +impl FromStr for ServerNameMatch { + type Err = GenericError; + fn from_str(s: &str) -> Result { + // we don't check if the label is a valid hostname here, we only check for wildcards + let (match_subdomains, s) = if s.starts_with("*.") { (true, &s[1..]) } else { (false, s) }; + if s.contains('*') { + return Err(GenericError::from_msg( + "internal wildcards are not supported (Hostnames may only start with '*.')", + )); + } + // we convert the hostname to lowercase since hostnames should be matched case-insensitively + Ok(Self { name: s.to_lowercase().into(), match_subdomains }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize, Default)] +pub struct FilterChainMatch { + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub destination_port: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub destination_prefix_ranges: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub server_names: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub source_prefix_ranges: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub source_ports: Vec, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum MatchResult { + FailedMatch, + NoRule, + Matched(u32), //todo, invert +} + +impl PartialOrd for MatchResult { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for MatchResult { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match (self, other) { + (Self::Matched(x), Self::Matched(y)) => x.cmp(y).reverse(), //inverted, lower-score means more specific match + (Self::FailedMatch, Self::FailedMatch) | (Self::NoRule, Self::NoRule) => std::cmp::Ordering::Equal, + // anything matched is better than not matched, NoRule is better than failing + (Self::Matched(_), _) | (Self::NoRule, Self::FailedMatch) => std::cmp::Ordering::Greater, + (_, Self::Matched(_)) | (Self::FailedMatch, Self::NoRule) => std::cmp::Ordering::Less, + } + } +} + +impl FilterChainMatch { + pub fn matches_destination_port(&self, port: u16) -> MatchResult { + match self.destination_port { + Some(destination) if port == destination => MatchResult::Matched(0), + Some(_) => MatchResult::FailedMatch, + None => MatchResult::NoRule, + } + } + + ///For criteria that allow ranges or wildcards, the most specific value in any of the configured filter chains that matches the incoming connection is going to be used (e.g. for SNI www.example.com the most specific match would be www.example.com, then *.example.com, then *.com, then any filter chain without server_names requirements). + pub fn matches_destination_ip(&self, ip: IpAddr) -> MatchResult { + self.destination_prefix_ranges + .iter() + .map(|range| { + if range.contains(&ip) { + let bits_matched = match ip { + IpAddr::V4(_) => 32, + IpAddr::V6(_) => 128, + } - (range.prefix_len() as u32); + MatchResult::Matched(bits_matched) + } else { + MatchResult::FailedMatch + } + }) + .max() + .unwrap_or(MatchResult::NoRule) + } + + pub fn matches_server_name(&self, server_name: &str) -> MatchResult { + self.server_names + .iter() + .map(|name_match| { + if name_match.match_subdomains { + //something.example.com matching *.example.com + // trim the '*' in the matcher + if server_name.ends_with(name_match.name.as_str()) { + // the score is the amount of labels in server_name that matched on the '*' (lower is more specific) + MatchResult::Matched( + // -1 so we include and extra dot and ".bad.domain" matching "*.bad.domain" won't score equal to an exact match + server_name[0..server_name.len() - (name_match.name.len() - 1)] + .chars() + .filter(|c| *c == '.') + .count() + .try_into() + .unwrap_or(u32::MAX), + ) + } else { + MatchResult::FailedMatch + } + } else if server_name == name_match.name { + MatchResult::Matched(0) + } else { + MatchResult::FailedMatch + } + }) + .max() + .unwrap_or(MatchResult::NoRule) + } + + pub fn matches_source_port(&self, source_port: u16) -> MatchResult { + if self.source_ports.is_empty() { + MatchResult::NoRule + } else if self.source_ports.iter().any(|p| *p == source_port) { + MatchResult::Matched(0) + } else { + MatchResult::FailedMatch + } + } + + ///For criteria that allow ranges or wildcards, the most specific value in any of the configured filter chains that matches the incoming connection is going to be used (e.g. for SNI www.example.com the most specific match would be www.example.com, then *.example.com, then *.com, then any filter chain without server_names requirements). + pub fn matches_source_ip(&self, ip: IpAddr) -> MatchResult { + self.source_prefix_ranges + .iter() + .map(|range| { + if range.contains(&ip) { + let bits_matched = match ip { + IpAddr::V4(_) => 32, + IpAddr::V6(_) => 128, + } - u32::from(range.prefix_len()); + MatchResult::Matched(bits_matched) + } else { + MatchResult::FailedMatch + } + }) + .max() + .unwrap_or(MatchResult::NoRule) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(tag = "type")] +#[serde(rename_all = "UPPERCASE")] +pub enum MainFilter { + Http(HttpConnectionManager), + Tcp(TcpProxy), +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct TlsConfig { + #[serde(skip_serializing_if = "std::ops::Not::not", default)] + pub require_client_certificate: bool, + #[serde(flatten)] + pub common_tls_context: CommonTlsContext, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use std::collections::HashMap; + use std::str::FromStr; + + use super::{FilterChain, FilterChainMatch, Listener, MainFilter, ServerNameMatch, TlsConfig}; + use crate::config::transport::SupportedEnvoyTransportSocket; + use crate::config::{ + common::*, + core::{Address, CidrRange}, + listener_filters::ListenerFilter, + util::{envoy_u32_to_u16, u32_to_u16}, + }; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::{ + core::v3::TransportSocket as EnvoyTransportSocket, + listener::v3::{ + filter::ConfigType as EnvoyConfigType, Filter as EnvoyFilter, FilterChain as EnvoyFilterChain, + FilterChainMatch as EnvoyFilterChainMatch, Listener as EnvoyListener, + }, + }, + extensions::{ + filters::network::{ + http_connection_manager::v3::HttpConnectionManager as EnvoyHttpConnectionManager, + rbac::v3::Rbac as EnvoyNetworkRbac, tcp_proxy::v3::TcpProxy as EnvoyTcpProxy, + }, + transport_sockets::tls::v3::DownstreamTlsContext as EnvoyDownstreamTlsContext, + }, + }, + google::protobuf::Any, + prost::Message, + }; + + impl TryFrom for Listener { + type Error = GenericError; + fn try_from(envoy: EnvoyListener) -> Result { + let EnvoyListener { + name, + address, + additional_addresses, + stat_prefix, + filter_chains, + filter_chain_matcher, + use_original_dst, + default_filter_chain, + per_connection_buffer_limit_bytes, + metadata, + deprecated_v1, + drain_type, + listener_filters, + listener_filters_timeout, + continue_on_listener_filters_timeout, + transparent, + freebind, + socket_options, + tcp_fast_open_queue_length, + traffic_direction, + udp_listener_config, + api_listener, + connection_balance_config, + reuse_port, + enable_reuse_port, + access_log, + tcp_backlog_size, + max_connections_to_accept_per_socket_event, + bind_to_port, + enable_mptcp, + ignore_global_conn_limit, + listener_specifier, + bypass_overload_manager, + fcds_config, + } = envoy; + unsupported_field!( + // name, + // address, + additional_addresses, + stat_prefix, + // filter_chains, + filter_chain_matcher, + use_original_dst, + default_filter_chain, + per_connection_buffer_limit_bytes, + metadata, + deprecated_v1, + drain_type, + // listener_filters, + listener_filters_timeout, + continue_on_listener_filters_timeout, + transparent, + freebind, + // socket_options, + tcp_fast_open_queue_length, + traffic_direction, + udp_listener_config, + api_listener, + connection_balance_config, + reuse_port, + enable_reuse_port, + access_log, + tcp_backlog_size, + max_connections_to_accept_per_socket_event, + bind_to_port, + enable_mptcp, + ignore_global_conn_limit, + listener_specifier, + bypass_overload_manager, + fcds_config + )?; + let name: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + let name = name.clone(); + let address = Address::into_socket_addr(convert_opt!(address)?); + let filter_chains: Vec = convert_non_empty_vec!(filter_chains)?; + let n_filter_chains = filter_chains.len(); + let filter_chains: HashMap<_, _> = filter_chains.into_iter().map(|x| x.0).collect(); + + // This is a hard requirement from Envoy as otherwise it can't pick which filterchain to use. + if filter_chains.len() != n_filter_chains { + return Err(GenericError::from_msg("filter chain contains duplicate filter_chain_match entries") + .with_node("filter_chains")); + } + let listener_filters: Vec = convert_vec!(listener_filters)?; + if listener_filters.len() > 1 { + return Err(GenericError::from_msg("at most one TLS inspector is supported as a listener filter")) + .with_node("listener_filters"); + } + let with_tls_inspector = !listener_filters.is_empty(); + let bind_device = convert_vec!(socket_options)?; + if bind_device.len() > 1 { + return Err(GenericError::from_msg("at most one bind device is supported")) + .with_node("socket_options"); + } + let bind_device = bind_device.into_iter().next(); + Ok(Self { name, address, filter_chains, bind_device, with_tls_inspector }) + }()) + .with_name(name) + } + } + + struct FilterChainWrapper((FilterChainMatch, FilterChain)); + + impl TryFrom for FilterChainWrapper { + type Error = GenericError; + fn try_from(envoy: EnvoyFilterChain) -> Result { + let EnvoyFilterChain { + filter_chain_match, + filters, + use_proxy_proto, + metadata, + transport_socket, + transport_socket_connect_timeout, + name, + } = envoy; + unsupported_field!( + // filter_chain_match, + // filters, + use_proxy_proto, + metadata, + // transport_socket, + transport_socket_connect_timeout // name, + )?; + let name: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + let name = name.clone(); + let filter_chain_match = filter_chain_match + .map(FilterChainMatch::try_from) + .transpose() + .with_node("filter_chain_match")? + .unwrap_or_default(); + let filters = required!(filters)?; + let mut rbac = Vec::new(); + let mut main_filter = None; + for (idx, filter) in filters.into_iter().enumerate() { + let filter_name = filter.name.clone().is_used().then_some(filter.name.clone()); + match Filter::try_from(filter) { + Ok(f) => match f.filter { + SupportedEnvoyFilter::NetworkRbac(rbac_filter) => { + if main_filter.is_some() { + Err(GenericError::from_msg( + "rbac filter found after a http connection manager or tcp proxy in the same filterchain", + )) + } else { + match rbac_filter.try_into() { + Ok(rbac_filter) => { + rbac.push(rbac_filter); + Ok(()) + }, + Result::<_, GenericError>::Err(e) => Err(e), + } + } + }, + SupportedEnvoyFilter::HttpConnectionManager(http) => { + if main_filter.is_some() { + Err(GenericError::from_msg( + "multiple http connection managers or tcp proxies defined in filterchain", + )) + } else { + match http.try_into() { + Err(e) => Err(e), + Ok(http) => { + main_filter = Some(MainFilter::Http(http)); + Ok(()) + }, + } + } + }, + SupportedEnvoyFilter::TcpProxy(tcp) => { + if main_filter.is_some() { + Err(GenericError::from_msg( + "multiple http connection managers or tcp proxies defined in filterchain", + )) + } else { + match tcp.try_into() { + Err(e) => Err(e), + Ok(tcp) => { + main_filter = Some(MainFilter::Tcp(tcp)); + Ok(()) + }, + } + } + }, + }, + Err(e) => Err(e), + } + .map_err(|err| if let Some(name) = filter_name { err.with_name(name) } else { err }) + .with_index(idx) + .with_node("filters")?; + } + + let Some(terminal_filter) = main_filter else { + return Err(GenericError::from_msg("no tcp proxy or http connection manager specified for chain") + .with_node("filters")); + }; + let tls_config = transport_socket.map(TlsConfig::try_from).transpose()?; + Ok(FilterChainWrapper((filter_chain_match, FilterChain { name, rbac, terminal_filter, tls_config }))) + }()) + .with_name(name) + } + } + + impl TryFrom for FilterChainMatch { + type Error = GenericError; + fn try_from(envoy: EnvoyFilterChainMatch) -> Result { + let EnvoyFilterChainMatch { + destination_port, + prefix_ranges, + address_suffix, + suffix_len, + direct_source_prefix_ranges, + source_type, + source_prefix_ranges, + source_ports, + server_names, + transport_protocol, + application_protocols, + } = envoy; + unsupported_field!( + // destination_port, + // prefix_ranges, + address_suffix, + suffix_len, + direct_source_prefix_ranges, + source_type, + // source_prefix_ranges, + // source_ports, + // server_names, + transport_protocol, + application_protocols + )?; + let server_names = server_names + .into_iter() + .map(|s| ServerNameMatch::from_str(&s)) + .collect::, _>>() + .with_node("server_names")?; + if server_names.iter().any(|sn| sn.name == "*") { + return Err( + GenericError::from_msg("full wildcard entries ('*') are not supported").with_node("server_names") + ); + } + let destination_port = destination_port.map(envoy_u32_to_u16).transpose().with_node("destination_port")?; + let source_ports = + source_ports.into_iter().map(u32_to_u16).collect::>().with_node("source_ports")?; + let destination_prefix_ranges = prefix_ranges + .into_iter() + .map(|envoy| CidrRange::try_from(envoy).map(CidrRange::into_ipnet)) + .collect::>() + .with_node("prefix_ranges")?; + let source_prefix_ranges = source_prefix_ranges + .into_iter() + .map(|envoy| CidrRange::try_from(envoy).map(CidrRange::into_ipnet)) + .collect::>() + .with_node("source_prefix_ranges")?; + Ok(Self { server_names, destination_port, source_ports, destination_prefix_ranges, source_prefix_ranges }) + } + } + + #[derive(Debug, Clone)] + struct Filter { + #[allow(unused)] + pub name: Option, + pub filter: SupportedEnvoyFilter, + } + + impl TryFrom for Filter { + type Error = GenericError; + fn try_from(envoy: EnvoyFilter) -> Result { + let EnvoyFilter { name, config_type } = envoy; + let name = name.is_used().then_some(CompactString::from(name)); + + let result = (|| -> Result<_, GenericError> { + let filter: SupportedEnvoyFilter = match required!(config_type)? { + EnvoyConfigType::ConfigDiscovery(_) => Err(GenericError::unsupported_variant("ConfigDiscovery")), + EnvoyConfigType::TypedConfig(typed_config) => SupportedEnvoyFilter::try_from(typed_config), + } + .with_node("config_type")?; + Ok(Self { name: name.clone(), filter }) + })(); + + if let Some(name) = name { + return result.with_name(name); + } + result + } + } + + #[derive(Debug, Clone)] + enum SupportedEnvoyFilter { + HttpConnectionManager(EnvoyHttpConnectionManager), + NetworkRbac(EnvoyNetworkRbac), + TcpProxy(EnvoyTcpProxy), + } + + impl TryFrom for SupportedEnvoyFilter { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" => { + EnvoyHttpConnectionManager::decode(typed_config.value.as_slice()).map(Self::HttpConnectionManager) + }, + "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC" => { + EnvoyNetworkRbac::decode(typed_config.value.as_slice()).map(Self::NetworkRbac) + }, + "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy" => { + EnvoyTcpProxy::decode(typed_config.value.as_slice()).map(Self::TcpProxy) + }, + _ => { + return Err(GenericError::unsupported_variant(typed_config.type_url)); + }, + } + .map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse protobuf for \"{}\"", typed_config.type_url), e) + }) + } + } + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(envoy: Any) -> Result { + SupportedEnvoyTransportSocket::try_from(envoy)?.try_into() + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(envoy: EnvoyTransportSocket) -> Result { + let EnvoyTransportSocket { name, config_type } = envoy; + // the envoy docs say that name has to be envoy.transport_sockets.tls or tls (deprecated) + // but it doesn't actually have to be, it just works with any string but it _is_ required to be + // non-empty. + // so in order to maximize compat with Envoys actual behaviour we check that it's not empty and leave it at that + let name = required!(name)?; + match required!(config_type)? { + orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::transport_socket::ConfigType::TypedConfig(any) => { + Self::try_from(any) + } + }.with_node("config_type").with_name(name) + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(value: SupportedEnvoyTransportSocket) -> Result { + match value { + SupportedEnvoyTransportSocket::DownstreamTlsContext(x) => x.try_into(), + SupportedEnvoyTransportSocket::UpstreamTlsContext(_) => { + Err(GenericError::unsupported_variant("UpstreamTlsContext")) + }, + } + } + } + + impl TryFrom for TlsConfig { + type Error = GenericError; + fn try_from(value: EnvoyDownstreamTlsContext) -> Result { + let EnvoyDownstreamTlsContext { + common_tls_context, + require_client_certificate, + require_sni, + disable_stateful_session_resumption, + session_timeout, + ocsp_staple_policy, + full_scan_certs_on_sni_mismatch, + session_ticket_keys_type, + prefer_client_ciphers, + } = value; + unsupported_field!( + // common_tls_context, + // require_client_certificate, + require_sni, + disable_stateful_session_resumption, + session_timeout, + ocsp_staple_policy, + full_scan_certs_on_sni_mismatch, + session_ticket_keys_type, + prefer_client_ciphers + )?; + let require_client_certificate = require_client_certificate.is_some_and(|v| v.value); + let common_tls_context = convert_opt!(common_tls_context)?; + Ok(Self { require_client_certificate, common_tls_context }) + } + } +} diff --git a/orion-configuration/src/config/listener_filters.rs b/orion-configuration/src/config/listener_filters.rs new file mode 100644 index 00000000..7b0a5da9 --- /dev/null +++ b/orion-configuration/src/config/listener_filters.rs @@ -0,0 +1,125 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use compact_str::CompactString; + +pub struct ListenerFilter { + pub name: CompactString, + pub config: ListenerFilterConfig, +} + +pub enum ListenerFilterConfig { + TlsInspector, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ListenerFilter, ListenerFilterConfig}; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::listener::v3::{ + listener_filter::ConfigType as EnvoyListenerFilterConfigType, ListenerFilter as EnvoyListenerFilter, + }, + extensions::filters::listener::tls_inspector::v3::TlsInspector as EnvoyTlsInspector, + }, + google::protobuf::Any, + prost::Message, + }; + #[derive(Debug, Clone)] + enum SupportedEnvoyListenerFilter { + TlsInspector(EnvoyTlsInspector), + } + + impl TryFrom for SupportedEnvoyListenerFilter { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" => { + EnvoyTlsInspector::decode(typed_config.value.as_slice()).map(Self::TlsInspector) + }, + _ => { + return Err(GenericError::unsupported_variant(typed_config.type_url)); + }, + } + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + } + } + + impl TryFrom for ListenerFilterConfig { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + SupportedEnvoyListenerFilter::try_from(typed_config)?.try_into() + } + } + impl TryFrom for ListenerFilter { + type Error = GenericError; + fn try_from(envoy: EnvoyListenerFilter) -> Result { + let EnvoyListenerFilter { name, filter_disabled, config_type } = envoy; + unsupported_field!(filter_disabled)?; + let name: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + let config = match required!(config_type) { + Ok(EnvoyListenerFilterConfigType::ConfigDiscovery(_)) => { + Err(GenericError::unsupported_variant("ConfigDiscovery")) + }, + Ok(EnvoyListenerFilterConfigType::TypedConfig(typed_config)) => { + ListenerFilterConfig::try_from(typed_config) + }, + Err(e) => Err(e), + }?; + Ok(Self { name: name.clone(), config }) + })() + .with_node("config_type") + .with_name(name) + } + } + + impl TryFrom for ListenerFilterConfig { + type Error = GenericError; + fn try_from(value: SupportedEnvoyListenerFilter) -> Result { + match value { + SupportedEnvoyListenerFilter::TlsInspector(EnvoyTlsInspector { + enable_ja3_fingerprinting, + enable_ja4_fingerprinting, + initial_read_buffer_size, + }) => { + // both fields are optional, and unsupported, but serde_yaml requires that at least one field is populated + // so allow for enable_ja3_fingerprinting: false + unsupported_field!(initial_read_buffer_size)?; + if enable_ja3_fingerprinting.is_some_and(|b| b.value) { + return Err(GenericError::UnsupportedField("enable_ja3_fingerprinting")); + } + if enable_ja4_fingerprinting.is_some_and(|b| b.value) { + return Err(GenericError::UnsupportedField("enable_ja4_fingerprinting")); + } + Ok(Self::TlsInspector) + }, + } + } + } +} diff --git a/orion-configuration/src/config/log.rs b/orion-configuration/src/config/log.rs new file mode 100644 index 00000000..092db024 --- /dev/null +++ b/orion-configuration/src/config/log.rs @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use tracing_subscriber::EnvFilter; + +#[derive(Debug, Deserialize, Serialize, Default)] +pub struct Log { + #[serde(deserialize_with = "deserialize_log_level", serialize_with = "serialize_log_level")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub log_level: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub log_directory: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub log_file: Option, +} + +impl PartialEq for Log { + fn eq(&self, other: &Self) -> bool { + self.log_file == other.log_file + && self.log_directory == other.log_directory + && self.log_level.as_ref().map(EnvFilter::to_string) == other.log_level.as_ref().map(EnvFilter::to_string) + } +} +impl Eq for Log {} + +fn deserialize_log_level<'de, D>(deserializer: D) -> std::result::Result, D::Error> +where + D: Deserializer<'de>, +{ + Option::::deserialize(deserializer).and_then(|maybe_string| { + maybe_string.map(|s| EnvFilter::builder().parse(s)).transpose().map_err( + |e: tracing_subscriber::filter::ParseError| { + serde::de::Error::custom(format!("failed to deserialize log level because of \"{e}\"")) + }, + ) + }) +} + +fn serialize_log_level( + value: &Option, + serializer: S, +) -> std::result::Result { + value.as_ref().map(EnvFilter::to_string).serialize(serializer) +} diff --git a/orion-configuration/src/config/network_filters.rs b/orion-configuration/src/config/network_filters.rs new file mode 100644 index 00000000..e0123688 --- /dev/null +++ b/orion-configuration/src/config/network_filters.rs @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod http_connection_manager; +pub use http_connection_manager::HttpConnectionManager; +pub mod network_rbac; +pub use network_rbac::NetworkRbac; +pub mod tcp_proxy; +pub use tcp_proxy::TcpProxy; diff --git a/orion-configuration/src/config/network_filters/http_connection_manager.rs b/orion-configuration/src/config/network_filters/http_connection_manager.rs new file mode 100644 index 00000000..a94d563e --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager.rs @@ -0,0 +1,1169 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod header_modifer; +use header_modifer::{HeaderModifier, HeaderValueOption}; +pub mod header_matcher; +use header_matcher::HeaderMatcher; +pub mod route; +use route::{Action, RouteMatch}; +pub mod http_filters; +use http_filters::{FilterOverride, HttpFilter}; + +use crate::config::common::*; +use compact_str::CompactString; +use exponential_backoff::Backoff; +use http::{HeaderName, HeaderValue, StatusCode}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, str::FromStr, time::Duration}; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HttpConnectionManager { + pub codec_type: CodecType, + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default)] + pub request_timeout: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub http_filters: Vec, + pub route_specifier: RouteSpecifier, +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] +pub enum CodecType { + #[serde(rename = "auto")] + Auto, + #[serde(rename = "HTTP1")] + Http1, + #[serde(rename = "HTTP2")] + Http2, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum RouteSpecifier { + Rds(RdsSpecifier), + RouteConfig(RouteConfiguration), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RouteConfiguration { + pub name: CompactString, + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + pub most_specific_header_mutations_wins: bool, + #[serde(skip_serializing_if = "is_default", default)] + pub response_header_modifier: HeaderModifier, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub request_headers_to_add: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + #[serde(with = "http_serde_ext::header_name::vec")] + pub request_headers_to_remove: Vec, + pub virtual_hosts: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum MatchHost { + Wildcard, + Prefix(CompactString), + Suffix(CompactString), + Exact(CompactString), +} + +impl Serialize for MatchHost { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Self::Wildcard => serializer.serialize_str("*"), + Self::Exact(cs) => serializer.serialize_str(cs.as_str()), + Self::Prefix(cs) => serializer.serialize_str(&format!("{cs}*")), + Self::Suffix(cs) => serializer.serialize_str(&format!("*{cs}")), + } + } +} + +impl<'de> Deserialize<'de> for MatchHost { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let cs = CompactString::deserialize(deserializer)?; + Self::try_from_compact_str(cs).map_err(|e| serde::de::Error::custom(format!("{e}"))) + } +} + +impl MatchHost { + pub fn try_from_compact_str(value: CompactString) -> Result { + let _ = HeaderValue::from_str(&value) + .map_err(|_| GenericError::from_msg(format!("failed to parse \"{value}\" as a headervalue")))?; + + if value == "*" { + return Ok(Self::Wildcard); + } + + if value.chars().filter(|c| *c == '*').count() > 1 { + return Err(GenericError::from_msg("only one wildcard supported at the beginning or at the end")); + } + + if let Some(host) = value.strip_prefix('*') { + return Ok(Self::Suffix(host.into())); + } + + if let Some(host) = value.strip_suffix('*') { + return Ok(Self::Prefix(host.into())); + } + + if value.contains('*') { + return Err(GenericError::from_msg("only one wildcard supported at the beginning or at the end")); + } + + Ok(Self::Exact(value)) + } +} + +impl TryFrom for MatchHost { + type Error = GenericError; + fn try_from(value: String) -> Result { + Self::try_from_compact_str(value.into()) + } +} + +impl TryFrom<&str> for MatchHost { + type Error = GenericError; + fn try_from(value: &str) -> Result { + Self::try_from_compact_str(value.into()) + } +} + +impl TryFrom for MatchHost { + type Error = GenericError; + fn try_from(value: CompactString) -> Result { + Self::try_from_compact_str(value) + } +} + +impl FromStr for MatchHost { + type Err = GenericError; + fn from_str(s: &str) -> Result { + Self::try_from(s) + } +} + +#[repr(u32)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +/// Score given to the request host matching to a given rule defined in the config +/// +/// Exact match always get prioritizied over Suffix matches. The integer content +/// represents how many character match for the request string in the uri.authority. +/// Hosts/domain matching order is derived implicitly from the enum lexicographic order +/// enum values are manually overwritten to avoid unwanted reordering +pub enum MatchHostScoreLPM { + Wildcard = 0, + Prefix(usize) = 1, + Suffix(usize) = 2, + Exact(usize) = 3, +} + +impl MatchHost { + pub fn eval_lpm_request(&self, req: &http::Request) -> Option { + if let Some(header_value) = req.headers().get(http::header::HOST) { + let host = header_value.to_str().ok()?; + self.eval_lpm_host(host) + } else { + self.eval_lpm_host(req.uri().host()?) + } + } + + pub fn eval_lpm_host(&self, mut host: &str) -> Option { + match self { + Self::Exact(h) => { + host = host.strip_suffix('.').unwrap_or(host); + (h == host).then_some(MatchHostScoreLPM::Exact(h.len())) + }, + + // Wildcard in Suffix and Prefix will not match empty strings, see + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-virtualhost + Self::Suffix(suffix) => { + host = host.strip_suffix('.').unwrap_or(host); + (host.len() > suffix.len() && host.ends_with(suffix.as_str())) + .then_some(MatchHostScoreLPM::Suffix(suffix.len())) + }, + + Self::Prefix(prefix) => (host.len() > prefix.len() && host.starts_with(prefix.as_str())) + .then_some(MatchHostScoreLPM::Prefix(prefix.len())), + + Self::Wildcard => Some(MatchHostScoreLPM::Wildcard), + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct VirtualHost { + pub name: CompactString, + pub domains: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub routes: Vec, + #[serde(skip_serializing_if = "is_default", default)] + pub response_header_modifier: HeaderModifier, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub request_headers_to_add: Vec, + #[serde(with = "http_serde_ext::header_name::vec")] + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub request_headers_to_remove: Vec, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub retry_policy: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RetryPolicy { + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub retry_on: Vec, + pub num_retries: u32, + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub per_try_timeout: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + #[serde(with = "http_serde_ext::status_code::vec")] + pub retriable_status_codes: Vec, + //envoy uses back_off but that's the verb + // the noun is backoff. + pub retry_backoff: RetryBackoff, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub retriable_headers: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub retriable_request_headers: Vec, +} + +impl RetryPolicy { + #[inline] + pub fn is_retriable(&self, req: &http::Request) -> bool { + // todo(haylyey): + // the docs say this field contains + // > HTTP headers which must be present in the request for retries to be attempted. + // so is the behaviour to ignore this when its empty or must the headers be present for it to retry? + self.retriable_request_headers.is_empty() + || self.retriable_request_headers.iter().any(|hm| hm.request_matches(req)) + } + + #[inline] + pub fn exponential_back_off(&self) -> Backoff { + Backoff::new(self.num_retries, self.retry_backoff.base_interval, self.retry_backoff.max_interval) + } + + #[inline] + pub fn per_try_timeout(&self) -> Option { + self.per_try_timeout + } + + #[inline] + pub fn num_retries(&self) -> u32 { + self.num_retries + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +pub struct RetryBackoff { + #[serde(with = "humantime_serde")] + pub base_interval: Duration, + #[serde(with = "humantime_serde")] + pub max_interval: Duration, +} + +impl Default for RetryBackoff { + fn default() -> Self { + Self { base_interval: Duration::from_millis(25), max_interval: Duration::from_millis(250) } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum RetryOn { + Err5xx, + GatewayError, + Reset, + ConnectFailure, + EnvoyRateLimited, + Retriable4xx, + RefusedStream, + RetriableStatusCodes, + RetriableHeaders, + Http3PostConnectFailure, +} + +impl FromStr for RetryOn { + type Err = GenericError; + fn from_str(s: &str) -> Result { + // see https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on + // for the list of fields and their meaning + match s { + "5xx" => Ok(RetryOn::Err5xx), + "gateway-error" => Ok(RetryOn::GatewayError), + "reset" => Ok(RetryOn::Reset), + "connect-failure" => Ok(RetryOn::ConnectFailure), + "envoy-ratelimited" => Ok(RetryOn::EnvoyRateLimited), + "retriable-4xx" => Ok(RetryOn::Retriable4xx), + "refused-stream" => Ok(RetryOn::RefusedStream), + "retriable-status-codes" => Ok(RetryOn::RetriableStatusCodes), + "retriable-headers" => Ok(RetryOn::RetriableHeaders), + "http3-post-connect-failure" => Ok(RetryOn::Http3PostConnectFailure), + s => Err(GenericError::from_msg(format!("Invalid RetryOn value \"{s}\""))), + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct Route { + #[serde(skip_serializing_if = "is_default", default)] + pub response_header_modifier: HeaderModifier, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub request_headers_to_add: Vec, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + #[serde(with = "http_serde_ext::header_name::vec")] + pub request_headers_to_remove: Vec, + #[serde(rename = "match")] + pub route_match: RouteMatch, + //todo(hayley): fix this field. Is it used correctly? key is name in higher level filter. value is overwrite (append-overwrite?) + #[serde(skip_serializing_if = "HashMap::is_empty", default = "Default::default")] + pub typed_per_filter_config: std::collections::HashMap, + #[serde(flatten)] + pub action: Action, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RdsSpecifier { + pub route_config_name: CompactString, + pub config_source: ConfigSource, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct ConfigSource { + pub config_source_specifier: ConfigSourceSpecifier, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub enum ConfigSourceSpecifier { + ADS, +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[inline] + fn request_uri(uri: &str) -> http::Request<()> { + http::Request::builder().uri(uri).body(()).unwrap() + } + + #[test] + fn match_host_exact() -> Result<(), GenericError> { + assert_eq!(MatchHost::from_str("www.example.com")?, MatchHost::Exact("www.example.com".into())); + + assert_eq!( + MatchHost::from_str("www.example.com")?.eval_lpm_host("www.example.com"), + Some(MatchHostScoreLPM::Exact("www.example.com".len())) + ); + + assert_eq!(MatchHost::from_str("another.example.com")?.eval_lpm_host("www.example.com"), None); + + assert_eq!( + MatchHost::from_str("localhost")?.eval_lpm_host("localhost"), + Some(MatchHostScoreLPM::Exact("localhost".len())) + ); + assert_eq!(MatchHost::from_str("localhost")?.eval_lpm_host("another"), None); + Ok(()) + } + + #[test] + fn match_host_suffix() -> Result<(), GenericError> { + assert_eq!(MatchHost::from_str("*.example.com")?, MatchHost::Suffix(".example.com".into())); + + assert_eq!( + MatchHost::from_str("*.example.com")?.eval_lpm_host("www.example.com"), + Some(MatchHostScoreLPM::Suffix(12)) + ); + + assert_eq!(MatchHost::from_str("*.example.com")?.eval_lpm_host("example.com"), None); + + Ok(()) + } + + #[test] + fn test_host_exact() { + let e: MatchHost = "test.com".parse().expect("test.com error parsing"); + assert_eq!(e, MatchHost::Exact("test.com".into())); + assert_eq!( + e.eval_lpm_request(&request_uri("http://test.com/foo/bar")), + Some(MatchHostScoreLPM::Exact("test.com".len())) + ); + assert_eq!( + e.eval_lpm_request(&request_uri("http://test.com./foo/bar")), + Some(MatchHostScoreLPM::Exact("test.com".len())) + ); + assert_eq!(e.eval_lpm_request(&request_uri("http://foo.test.com/bar")), None); + } + + #[test] + fn test_host_suffix() { + let rule: MatchHost = "*.test.com".parse().expect("*.test.com error parsing"); + assert_eq!(rule, MatchHost::Suffix(".test.com".into())); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://foo.test.com/bar")), + Some(MatchHostScoreLPM::Suffix(".test.com".len())) + ); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://foo.bar.test.com/foo/bar")), + Some(MatchHostScoreLPM::Suffix(".test.com".len())) + ); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://foo.test.com./foo/bar")), + Some(MatchHostScoreLPM::Suffix(".test.com".len())) + ); + assert_eq!(rule.eval_lpm_request(&request_uri("http://foo.bar.test2.com/foo/bar")), None); + assert_eq!(rule.eval_lpm_request(&request_uri("http://*test.com/foo/bar")), None); + + let rule: MatchHost = "*-bar.foo.com".parse().expect("*-bar.foo.com error parsing"); + assert_eq!(rule, MatchHost::Suffix("-bar.foo.com".into())); + + assert_eq!( + rule.eval_lpm_request(&request_uri("http://baz-bar.foo.com/foo/bar")), + Some(MatchHostScoreLPM::Suffix("-bar.foo.com".len())) + ); + + assert_eq!(rule.eval_lpm_request(&request_uri("http://-bar.foo.com/foo/bar")), None); + } + + #[test] + fn test_host_prefix() { + let rule: MatchHost = "www.test.*".parse().expect("www.test.* error parsing"); + assert_eq!(rule, MatchHost::Prefix("www.test.".into())); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://www.test.com/bar")), + Some(MatchHostScoreLPM::Prefix("www.test.".len())) + ); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://www.test.it/bar")), + Some(MatchHostScoreLPM::Prefix("www.test.".len())) + ); + assert_eq!( + rule.eval_lpm_request(&request_uri("http://www.test.com./bar")), + Some(MatchHostScoreLPM::Prefix("www.test.".len())) + ); + + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test2.com/bar")), None); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test./bar")), None); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test/bar")), None); + } + + #[test] + fn test_host_wildcard() { + let rule: MatchHost = "*".parse().expect("* error parsing"); + assert_eq!(rule, MatchHost::Wildcard); + + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test.com/bar")), Some(MatchHostScoreLPM::Wildcard)); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test.it/bar")), Some(MatchHostScoreLPM::Wildcard)); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test.com./bar")), Some(MatchHostScoreLPM::Wildcard)); + + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test2.com/bar")), Some(MatchHostScoreLPM::Wildcard)); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test./bar")), Some(MatchHostScoreLPM::Wildcard)); + assert_eq!(rule.eval_lpm_request(&request_uri("http://www.test/bar")), Some(MatchHostScoreLPM::Wildcard)); + } + + #[test] + fn test_bad_rules() { + assert!("*asdf*".parse::().is_err()); + assert!("*.example.*.com".parse::().is_err()); + assert!("**".parse::().is_err()); + assert!("asdf*asdf".parse::().is_err()); + assert!("*asdf*".parse::().is_err()); + assert!("*asdf*asdf".parse::().is_err()); + assert!("asdf*asdf*".parse::().is_err()); + } + + #[test] + fn test_host_cmp() { + assert!(MatchHostScoreLPM::Exact("test.com".len()) < MatchHostScoreLPM::Exact("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Suffix("test.com".len()) < MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Suffix("test.com".len()) < MatchHostScoreLPM::Suffix(".test.com".len())); + assert!(MatchHostScoreLPM::Exact("test.com".len()) > MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert_eq!(MatchHostScoreLPM::Suffix("foo.test.com".len()), MatchHostScoreLPM::Suffix("bar.test.com".len())); + + assert!(MatchHostScoreLPM::Exact("test.com".len()) < MatchHostScoreLPM::Exact("foo.bar.test".len())); + assert!(MatchHostScoreLPM::Exact("test.com".len()) > MatchHostScoreLPM::Prefix("foo.bar.test".len())); + assert!(MatchHostScoreLPM::Exact("test.com".len()) > MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Exact("test.com".len()) > MatchHostScoreLPM::Wildcard); + + assert!(MatchHostScoreLPM::Suffix(".test.com".len()) < MatchHostScoreLPM::Exact("test.com".len())); + assert!(MatchHostScoreLPM::Suffix(".test.com".len()) > MatchHostScoreLPM::Prefix("foo.bar.test".len())); + assert!(MatchHostScoreLPM::Suffix(".test.com".len()) < MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Suffix(".test.com".len()) > MatchHostScoreLPM::Wildcard); + + assert!(MatchHostScoreLPM::Prefix("www.test.".len()) < MatchHostScoreLPM::Exact("test.com".len())); + assert!(MatchHostScoreLPM::Prefix("www.test.".len()) < MatchHostScoreLPM::Prefix("foo.bar.test.".len())); + assert!(MatchHostScoreLPM::Prefix("www.test.".len()) < MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Prefix("www.test.".len()) > MatchHostScoreLPM::Wildcard); + + assert!(MatchHostScoreLPM::Wildcard < MatchHostScoreLPM::Exact("test.com".len())); + assert!(MatchHostScoreLPM::Wildcard < MatchHostScoreLPM::Prefix("foo.bar.test.".len())); + assert!(MatchHostScoreLPM::Wildcard < MatchHostScoreLPM::Suffix("foo.bar.test.com".len())); + assert!(MatchHostScoreLPM::Wildcard == MatchHostScoreLPM::Wildcard); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ + header_modifer::HeaderModifier, + http_filters::{ + router::Router, FilterConfigOverride, FilterOverride, HttpFilter, HttpFilterType, SupportedEnvoyFilter, + SupportedEnvoyHttpFilter, + }, + CodecType, ConfigSource, ConfigSourceSpecifier, HttpConnectionManager, RdsSpecifier, RetryBackoff, RetryOn, + RetryPolicy, Route, RouteConfiguration, RouteSpecifier, VirtualHost, + }; + use crate::config::{ + common::*, + util::{duration_from_envoy, http_status_from}, + }; + use compact_str::CompactString; + use http::HeaderName; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::{ + core::v3::{ + config_source::ConfigSourceSpecifier as EnvoyConfigSourceSpecifier, AggregatedConfigSource, + ConfigSource as EnvoyConfigSource, + }, + route::v3::{ + retry_policy::RetryBackOff as EnvoyRetryBackoff, RetryPolicy as EnvoyRetryPolicy, Route as EnvoyRoute, + RouteConfiguration as EnvoyRouteConfiguration, VirtualHost as EnvoyVirtualHost, + }, + }, + extensions::filters::network::http_connection_manager::v3::{ + http_connection_manager::{CodecType as EnvoyCodecType, RouteSpecifier as EnvoyRouteSpecifier}, + HttpConnectionManager as EnvoyHttpConnectionManager, Rds as EnvoyRds, + }, + }; + use std::{collections::HashMap, str::FromStr, time::Duration}; + + impl HttpConnectionManager { + fn ensure_corresponding_filter_exists( + filter_override: (&CompactString, &FilterOverride), + http_filters: &[HttpFilter], + ) -> Result<(), GenericError> { + let (name, config) = filter_override; + match http_filters.iter().find(|filter| filter.name == name) { + None => Err(GenericError::from_msg(format!("http filter \"{name}\" does not exist"))), + Some(matching_filter) => match &config.filter_settings { + None => Ok(()), + Some(x) => match (x, &matching_filter.filter) { + (FilterConfigOverride::LocalRateLimit(_), HttpFilterType::RateLimit(_)) + | (FilterConfigOverride::Rbac(_), HttpFilterType::Rbac(_)) => Ok(()), + (_, _) => Err(GenericError::from_msg(format!( + "can't override http filter \"{name}\" with a different filter type" + ))), + }, + }, + } + } + } + + impl TryFrom for HttpConnectionManager { + type Error = GenericError; + #[allow(clippy::too_many_lines)] + fn try_from(envoy: EnvoyHttpConnectionManager) -> Result { + let EnvoyHttpConnectionManager { + codec_type, + stat_prefix, + http_filters, + add_user_agent, + tracing, + common_http_protocol_options, + http_protocol_options, + http2_protocol_options, + http3_protocol_options, + server_name, + server_header_transformation, + scheme_header_transformation, + max_request_headers_kb, + stream_idle_timeout, + request_timeout, + request_headers_timeout, + drain_timeout, + delayed_close_timeout, + access_log, + access_log_flush_interval, + flush_access_log_on_new_request, + access_log_options, + use_remote_address, + xff_num_trusted_hops, + original_ip_detection_extensions, + early_header_mutation_extensions, + internal_address_config, + skip_xff_append, + via, + generate_request_id, + preserve_external_request_id, + always_set_request_id_in_response, + forward_client_cert_details, + set_current_client_cert_details, + proxy_100_continue, + represent_ipv4_remote_address_as_ipv4_mapped_ipv6, + upgrade_configs, + normalize_path, + merge_slashes, + path_with_escaped_slashes_action, + request_id_extension, + local_reply_config, + strip_matching_host_port, + stream_error_on_invalid_http_message, + path_normalization_options, + strip_trailing_host_dot, + proxy_status_config, + typed_header_validation_config, + append_x_forwarded_port, + add_proxy_protocol_connection_state, + route_specifier, + strip_port_mode, + http1_safe_max_connection_duration, + append_local_overload, + } = envoy; + unsupported_field!( + // codec_type, + // stat_prefix, + // http_filters, + add_user_agent, + tracing, + common_http_protocol_options, + http_protocol_options, + http2_protocol_options, + http3_protocol_options, + server_name, + server_header_transformation, + scheme_header_transformation, + max_request_headers_kb, + stream_idle_timeout, + // request_timeout, + request_headers_timeout, + drain_timeout, + delayed_close_timeout, + access_log, + access_log_flush_interval, + flush_access_log_on_new_request, + access_log_options, + use_remote_address, + xff_num_trusted_hops, + original_ip_detection_extensions, + early_header_mutation_extensions, + internal_address_config, + skip_xff_append, + via, + generate_request_id, + preserve_external_request_id, + always_set_request_id_in_response, + forward_client_cert_details, + set_current_client_cert_details, + proxy_100_continue, + represent_ipv4_remote_address_as_ipv4_mapped_ipv6, + upgrade_configs, + normalize_path, + merge_slashes, + path_with_escaped_slashes_action, + request_id_extension, + local_reply_config, + strip_matching_host_port, + stream_error_on_invalid_http_message, + path_normalization_options, + strip_trailing_host_dot, + proxy_status_config, + typed_header_validation_config, + append_x_forwarded_port, + add_proxy_protocol_connection_state, + // route_specifier, + strip_port_mode, + http1_safe_max_connection_duration, + append_local_overload + )?; + if stat_prefix.is_used() { + tracing::warn!( + "unsupported field stat_prefix used in http_connection_manager. This field will be ignored." + ); + } + let codec_type = codec_type.try_into().with_node("codec")?; + let route_specifier = route_specifier.try_into()?; + let request_timeout = request_timeout + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("failed to convert into Duration")) + .with_node("request_timeout")?; + let mut http_filters: Vec = convert_non_empty_vec!(http_filters)?; + match http_filters.pop() { + Some(SupportedEnvoyHttpFilter { filter: SupportedEnvoyFilter::Router(rtr), name, disabled: false }) => { + Router::try_from(rtr).with_node(name) + }, + Some(SupportedEnvoyHttpFilter { filter: SupportedEnvoyFilter::Router(_), name, disabled: true }) => { + Err(GenericError::from_msg("router cannot be disabled").with_node(name)) + }, + _ => Err(GenericError::from_msg("final filter of the chain has to be a router")), + } + .with_node("http_filters")?; + + let http_filters = convert_vec!(http_filters).with_node("http_filters")?; + + // and now we make sure to validate that any overrides specified in the routes, actually match the name and type of these filters + //todo(hayley): this check only happens when converting from envoy to our config types. + // we should make sure this check always happens when constructing, so it also happens when deserializing this struct directly. + // or maybe want to specify over-rides per filter type in the ng config struct so that the equality gets encoded in the type system + // or diverge from envoy by letting the user override filters with a different type. Doesn't seem like it's too terrible an idea + if let RouteSpecifier::RouteConfig(route_config) = &route_specifier { + for vh in &route_config.virtual_hosts { + let result = vh + .routes + .iter() + .flat_map(|r| { + r.typed_per_filter_config.iter().map(|filter_override| { + Self::ensure_corresponding_filter_exists(filter_override, &http_filters) + }) + }) + .collect::>(); + if let Err(e) = result { + return Err(e + .with_node("typed_per_filter_config") + .with_node("route") + .with_node(vh.name.clone()) + .with_node("virtual_hosts") + .with_node("route_specifier")); + } + } + } + Ok(Self { codec_type, http_filters, route_specifier, request_timeout }) + } + } + + impl TryFrom for CodecType { + type Error = GenericError; + fn try_from(envoy: EnvoyCodecType) -> Result { + match envoy { + EnvoyCodecType::Auto => Ok(Self::Auto), + EnvoyCodecType::Http1 => Ok(Self::Http1), + EnvoyCodecType::Http2 => Ok(Self::Http2), + EnvoyCodecType::Http3 => Err(GenericError::unsupported_variant("Http3")), + } + } + } + + impl TryFrom for CodecType { + type Error = GenericError; + fn try_from(value: i32) -> Result { + EnvoyCodecType::from_i32(value).ok_or(GenericError::unsupported_variant("[unknown codec type]"))?.try_into() + } + } + + impl TryFrom> for RouteSpecifier { + type Error = GenericError; + fn try_from(envoy: Option) -> Result { + Ok(match envoy { + Some(EnvoyRouteSpecifier::Rds(rds)) => { + Self::Rds(RdsSpecifier::try_from(rds).map_err(|e| e.with_node("rds"))?) + }, + Some(EnvoyRouteSpecifier::RouteConfig(envoy)) => { + Self::RouteConfig(envoy.try_into().with_node("route_config")?) + }, + Some(EnvoyRouteSpecifier::ScopedRoutes(_)) => { + return Err(GenericError::unsupported_variant("ScopedRoutes")) + }, + None => return Err(GenericError::MissingField("rds or route_config")), + }) + } + } + + impl TryFrom for RouteConfiguration { + type Error = GenericError; + fn try_from(envoy: EnvoyRouteConfiguration) -> Result { + let EnvoyRouteConfiguration { + name, + virtual_hosts, + vhds, + internal_only_headers, + response_headers_to_add, + response_headers_to_remove, + request_headers_to_add, + request_headers_to_remove, + most_specific_header_mutations_wins, + validate_clusters, + max_direct_response_body_size_bytes, + cluster_specifier_plugins, + request_mirror_policies, + ignore_port_in_host_matching, + ignore_path_parameters_in_path_matching, + typed_per_filter_config, + metadata, + } = envoy; + unsupported_field!( + // name, + // virtual_hosts, + vhds, + internal_only_headers, + // response_headers_to_add, + // response_headers_to_remove, + // request_headers_to_add, + // request_headers_to_remove, + // most_specific_header_mutations_wins, + validate_clusters, + max_direct_response_body_size_bytes, + cluster_specifier_plugins, + request_mirror_policies, + ignore_port_in_host_matching, + ignore_path_parameters_in_path_matching, + typed_per_filter_config, + metadata + )?; + let name: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + let response_headers_to_add = convert_vec!(response_headers_to_add)?; + let request_headers_to_add = convert_vec!(request_headers_to_add)?; + let response_headers_to_remove = response_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("response_headers_to_remove") + }) + }) + .collect::, _>>()?; + let request_headers_to_remove = request_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("request_headers_to_remove") + }) + }) + .collect::, _>>()?; + let virtual_hosts = convert_non_empty_vec!(virtual_hosts)?; + let response_header_modifier = HeaderModifier::new(response_headers_to_remove, response_headers_to_add); + Ok(Self { + name: name.clone(), + virtual_hosts, + most_specific_header_mutations_wins, + response_header_modifier, + request_headers_to_add, + request_headers_to_remove, + }) + })() + .with_name(name) + } + } + + impl TryFrom for VirtualHost { + type Error = GenericError; + fn try_from(envoy: EnvoyVirtualHost) -> Result { + let EnvoyVirtualHost { + name, + domains, + routes, + matcher, + require_tls, + virtual_clusters, + rate_limits, + request_headers_to_add, + request_headers_to_remove, + response_headers_to_add, + response_headers_to_remove, + cors, + typed_per_filter_config, + include_request_attempt_count, + include_attempt_count_in_response, + retry_policy, + retry_policy_typed_config, + hedge_policy, + include_is_timeout_retry_header, + per_request_buffer_limit_bytes, + request_mirror_policies, + metadata, + } = envoy; + unsupported_field!( + // name, + // domains, + // routes, + matcher, + require_tls, + virtual_clusters, + rate_limits, + // request_headers_to_add, + // request_headers_to_remove, + // response_headers_to_add, + // response_headers_to_remove, + cors, + typed_per_filter_config, + include_request_attempt_count, + include_attempt_count_in_response, + // retry_policy, + retry_policy_typed_config, + hedge_policy, + include_is_timeout_retry_header, + per_request_buffer_limit_bytes, + request_mirror_policies, + metadata + )?; + let name: CompactString = required!(name)?.into(); + (|| -> Result<_, GenericError> { + let response_headers_to_add = convert_vec!(response_headers_to_add)?; + let request_headers_to_add = convert_vec!(request_headers_to_add)?; + let response_headers_to_remove = response_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("response_headers_to_remove") + }) + }) + .collect::, _>>()?; + let request_headers_to_remove = request_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("request_headers_to_remove") + }) + }) + .collect::, _>>()?; + let domains = convert_non_empty_vec!(domains)?; + let routes = convert_vec!(routes)?; + + let retry_policy = retry_policy.map(RetryPolicy::try_from).transpose().with_node("retry_policy")?; + let response_header_modifier = HeaderModifier::new(response_headers_to_remove, response_headers_to_add); + Ok(Self { + name: name.clone(), + routes, + domains, + request_headers_to_add, + request_headers_to_remove, + retry_policy, + response_header_modifier, + }) + })() + .with_name(name) + } + } + + impl TryFrom for RetryPolicy { + type Error = GenericError; + fn try_from(value: EnvoyRetryPolicy) -> Result { + let EnvoyRetryPolicy { + retry_on, + num_retries, + per_try_timeout, + per_try_idle_timeout, + retry_priority, + retry_host_predicate, + retry_options_predicates, + host_selection_retry_max_attempts, + retriable_status_codes, + retry_back_off, + rate_limited_retry_back_off, + retriable_headers, + retriable_request_headers, + } = value; + unsupported_field!( + // retry_on, + // num_retries, + // per_try_timeout, + per_try_idle_timeout, + retry_priority, + retry_host_predicate, + retry_options_predicates, + host_selection_retry_max_attempts, + // retriable_status_codes, + // retry_back_off, + // retriable_headers, + // retriable_request_headers + rate_limited_retry_back_off + )?; + let retry_on = + retry_on.split(',').map(RetryOn::from_str).collect::, _>>().with_node("retry_on")?; + let num_retries = num_retries.map(|v| v.value).unwrap_or(1); + // from the docs, + // > If left unspecified, Envoy will use the global + // > :ref:`route timeout ` for the request. + // do we do that? if not we should require this field first. + // and, if we do use this field, do/should we ignore the route action timeout? + let per_try_timeout = per_try_timeout + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("failed to convert into Duration").with_node("per_try_timeout"))?; + let retriable_status_codes = retriable_status_codes + .into_iter() + .map(http_status_from) + .collect::, _>>() + .with_node("retriable_status_codes")?; + let retry_backoff = + retry_back_off.map(RetryBackoff::try_from).transpose().with_node("retry_backoff")?.unwrap_or_default(); + let retriable_headers = convert_vec!(retriable_headers)?; + let retriable_request_headers = convert_vec!(retriable_request_headers)?; + Ok(Self { + retry_on, + num_retries, + per_try_timeout, + retriable_status_codes, + retry_backoff, + retriable_request_headers, + retriable_headers, + }) + } + } + + impl TryFrom for RetryBackoff { + type Error = GenericError; + fn try_from(value: EnvoyRetryBackoff) -> Result { + let EnvoyRetryBackoff { base_interval, max_interval } = value; + //note: envoy docs says this can't be zero, but also that less than 1ms gets rounded up + // so for simplicity we just round up zero too. + let base_interval = duration_from_envoy(required!(base_interval)?) + .with_node("base_interval")? + .max(Duration::from_millis(1)); + let max_interval = max_interval + .map(duration_from_envoy) + .transpose() + .map_err(|_| GenericError::from_msg("failed to convert into Duration")) + .with_node("max_interval")? + .unwrap_or(base_interval * 10); + if max_interval < base_interval { + return Err(GenericError::from_msg(format!( + "max_interval ({}ms) is less than base_interval ({}ms)", + max_interval.as_millis(), + base_interval.as_millis() + ))); + } + Ok(Self { base_interval, max_interval }) + } + } + + impl TryFrom for Route { + type Error = GenericError; + fn try_from(envoy: EnvoyRoute) -> Result { + let EnvoyRoute { + name, + r#match, + metadata, + decorator, + typed_per_filter_config, + request_headers_to_add, + request_headers_to_remove, + response_headers_to_add, + response_headers_to_remove, + tracing, + per_request_buffer_limit_bytes, + stat_prefix, + action, + } = envoy; + unsupported_field!( + name, + // r#match, + metadata, + decorator, + // typed_per_filter_config, + // request_headers_to_add, + // request_headers_to_remove, + // response_headers_to_add, + // response_headers_to_remove, + tracing, + per_request_buffer_limit_bytes, + stat_prefix // action + )?; + let response_headers_to_add = convert_vec!(response_headers_to_add)?; + let request_headers_to_add = convert_vec!(request_headers_to_add)?; + let response_headers_to_remove = response_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("response_headers_to_remove") + }) + }) + .collect::, _>>()?; + let request_headers_to_remove = request_headers_to_remove + .into_iter() + .map(|s| { + HeaderName::from_str(s.as_str()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to convert \"{s}\" into HeaderName"), e) + .with_node("request_headers_to_remove") + }) + }) + .collect::, _>>()?; + let action = convert_opt!(action)?; + let route_match = convert_opt!(r#match, "match")?; + let typed_per_filter_config = { + typed_per_filter_config + .into_iter() + .map(|(name, typed_config)| { + FilterOverride::try_from(typed_config).map(|x| (CompactString::new(&name), x)).with_node(name) + }) + .collect::, GenericError>>() + } + .with_node("typed_per_filter_config")?; + let response_header_modifier = HeaderModifier::new(response_headers_to_remove, response_headers_to_add); + Ok(Self { + route_match, + action, + typed_per_filter_config, + request_headers_to_add, + request_headers_to_remove, + response_header_modifier, + }) + } + } + + impl TryFrom for RdsSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyRds) -> Result { + let EnvoyRds { config_source, route_config_name } = value; + let route_config_name = required!(route_config_name)?.into(); + let config_source = convert_opt!(config_source)?; + Ok(Self { route_config_name, config_source }) + } + } + impl TryFrom for ConfigSource { + type Error = GenericError; + fn try_from(value: EnvoyConfigSource) -> Result { + let EnvoyConfigSource { authorities, initial_fetch_timeout, resource_api_version, config_source_specifier } = + value; + unsupported_field!(authorities, initial_fetch_timeout, resource_api_version)?; + let config_source_specifier = convert_opt!(config_source_specifier)?; + Ok(Self { config_source_specifier }) + } + } + + impl TryFrom for ConfigSourceSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyConfigSourceSpecifier) -> Result { + match value { + EnvoyConfigSourceSpecifier::Ads(AggregatedConfigSource {}) => Ok(Self::ADS), + EnvoyConfigSourceSpecifier::ApiConfigSource(_) => { + Err(GenericError::unsupported_variant("ApiConfigSource")) + }, + EnvoyConfigSourceSpecifier::Path(_) => Err(GenericError::unsupported_variant("Path")), + EnvoyConfigSourceSpecifier::PathConfigSource(_) => { + Err(GenericError::unsupported_variant("PathConfigSource")) + }, + EnvoyConfigSourceSpecifier::Self_(_) => Err(GenericError::unsupported_variant("Self_")), + } + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/header_matcher.rs b/orion-configuration/src/config/network_filters/http_connection_manager/header_matcher.rs new file mode 100644 index 00000000..a3db50a2 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/header_matcher.rs @@ -0,0 +1,388 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub use crate::config::network_filters::network_rbac::Action; +use crate::config::{common::*, core::StringMatcher}; +use http::{HeaderMap, HeaderName, Method, Request, Response, Uri}; +use serde::{de::Visitor, Deserialize, Serialize}; +use std::str::FromStr; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum HeaderNames { + Method, + Path, + Scheme, + NormalHeader(HeaderName), +} + +impl Serialize for HeaderNames { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Self::Method => serializer.serialize_str(":method"), + Self::Path => serializer.serialize_str(":path"), + Self::Scheme => serializer.serialize_str(":scheme"), + Self::NormalHeader(header) => http_serde_ext::header_name::serialize(header, serializer), + } + } +} + +impl<'de> Deserialize<'de> for HeaderNames { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct StrVisitor; + impl<'de> Visitor<'de> for StrVisitor { + type Value = HeaderNames; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("`str`") + } + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + match v { + ":path" => Ok(HeaderNames::Path), + ":method" => Ok(HeaderNames::Method), + ":scheme" => Ok(HeaderNames::Scheme), + s => HeaderName::from_str(s).map(HeaderNames::NormalHeader).map_err(E::custom), + } + } + } + deserializer.deserialize_str(StrVisitor) + } +} + +impl From for HeaderNames { + fn from(value: HeaderName) -> Self { + //don't need to check for headers starting with a colon here, because those are invalid + Self::NormalHeader(value) + } +} + +impl FromStr for HeaderNames { + type Err = GenericError; + fn from_str(s: &str) -> Result { + match s { + ":method" => Ok(Self::Method), + ":scheme" => Ok(Self::Scheme), + ":path" => Ok(Self::Path), + s => HeaderName::from_str(s).map(Self::NormalHeader).map_err(|e| { + GenericError::from_msg_with_cause(format!("couldn't convert \"{s}\" into a HeaderName"), e) + }), + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HeaderMatcher { + #[serde(rename = "name")] + pub header_name: HeaderNames, + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + pub invert_match: bool, + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + pub treat_missing_header_as_empty: bool, + #[serde(flatten)] + pub header_matcher: StringMatcher, +} + +impl HeaderMatcher { + pub fn request_matches(&self, request: &Request) -> bool { + self.is_match(Some(request.method()), Some(request.uri()), request.headers()) + } + pub fn response_matches(&self, response: &Response) -> bool { + //todo(hayley): what is the expected behaviour here? responses don't have a uri or method + // what if someone sets them in the config though, with treat_missing_header_as_empty? + self.is_match(None, None, response.headers()) + } + /// checks if this matcher matches any of the headers in header_map + /// Header values that contain non visible-ascii characters are skipped + fn is_match(&self, method: Option<&Method>, uri: Option<&Uri>, header_map: &HeaderMap) -> bool { + match &self.header_name { + HeaderNames::Method => { + let method = match method { + None if self.treat_missing_header_as_empty => "", + Some(method) => method.as_str(), + None => { + return false; + }, + }; + self.header_matcher.matches(method) ^ self.invert_match + }, + HeaderNames::Path => { + let path = match uri { + None if self.treat_missing_header_as_empty => "", + Some(uri) => uri.path(), + None => { + return false; + }, + }; + self.header_matcher.matches(path) ^ self.invert_match + }, + HeaderNames::Scheme => { + let scheme = match uri.map(|uri| uri.scheme_str()) { + None | Some(None) if self.treat_missing_header_as_empty => "", + Some(Some(scheme)) => scheme, + None | Some(None) => { + return false; + }, + }; + self.header_matcher.matches(scheme) ^ self.invert_match + }, + HeaderNames::NormalHeader(header_name) => { + let mut header_values = header_map.get_all(header_name).into_iter().map(|hv| hv.to_str().ok()); + match header_values.next() { + //no header found + None => { + if self.treat_missing_header_as_empty { + self.header_matcher.matches("") ^ self.invert_match + } else { + // missing headers don't get inverted + false + } + }, + Some(first) => { + let first_result = first.map(|s| self.header_matcher.matches(s)).unwrap_or(false); + let any_matched = first_result + | header_values.any(|hv| hv.map(|s| self.header_matcher.matches(s)).unwrap_or(false)); + any_matched ^ self.invert_match + }, + } + }, + } + } +} + +#[cfg(test)] +mod header_matcher_tests { + use super::*; + use crate::config::core::{StringMatcher, StringMatcherPattern}; + use http::header::*; + use std::str::FromStr; + + #[test] + fn test_header_exact() { + let mut hm = HeaderMap::new(); + + let mut h = HeaderMatcher { + header_name: ACCEPT.into(), + invert_match: false, + treat_missing_header_as_empty: false, + header_matcher: StringMatcher { + ignore_case: false, + pattern: StringMatcherPattern::Exact("text/html".into()), + }, + }; + + hm.insert(ACCEPT, HeaderValue::from_static("text/html")); + + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + + assert!(h.request_matches(&req)); + + h.header_matcher = + StringMatcher { ignore_case: false, pattern: StringMatcherPattern::Exact("text/json".into()) }; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + + assert!(!h.request_matches(&req)); + } + + #[test] + fn test_missing() { + let mut hm = HeaderMap::new(); + + let mut h = HeaderMatcher { + header_name: ACCEPT.into(), + invert_match: false, + treat_missing_header_as_empty: false, + header_matcher: StringMatcher { ignore_case: false, pattern: StringMatcherPattern::Exact("".into()) }, + }; + + h.treat_missing_header_as_empty = true; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = true; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.header_matcher = + StringMatcher { ignore_case: false, pattern: StringMatcherPattern::Exact("not empty".into()) }; + + h.treat_missing_header_as_empty = true; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = true; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(h.request_matches(&req)); + + hm.insert(ACCEPT, HeaderValue::from_static("not empty")); + h.treat_missing_header_as_empty = true; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = false; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(h.request_matches(&req)); + + h.treat_missing_header_as_empty = false; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + + h.treat_missing_header_as_empty = true; + h.invert_match = true; + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + let req = builder.body(()).unwrap(); + assert!(!h.request_matches(&req)); + } + + #[test] + fn test_header_regex() { + let mut hm = HeaderMap::new(); + let re = "[A-Za-z]+/[A-Za-z]+"; + + let h = HeaderMatcher { + header_name: ACCEPT.into(), + invert_match: false, + treat_missing_header_as_empty: false, + header_matcher: StringMatcher { + ignore_case: true, + pattern: StringMatcherPattern::Regex(regex::Regex::from_str(re).unwrap()), + }, + }; + + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + assert!(!h.request_matches(&builder.body(()).unwrap())); + + hm.insert(ACCEPT, HeaderValue::from_static("text/json")); + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm.clone(); + assert!(h.request_matches(&builder.body(()).unwrap())); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{HeaderMatcher, HeaderNames}; + use crate::config::{ + common::*, + core::{regex_from_envoy, StringMatcher, StringMatcherPattern}, + }; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::route::v3::{ + header_matcher::HeaderMatchSpecifier as EnvoyHeaderMatchSpecifier, HeaderMatcher as EnvoyHeaderMatcher, + }; + use std::str::FromStr; + + impl TryFrom for HeaderMatcher { + type Error = GenericError; + fn try_from(value: EnvoyHeaderMatcher) -> Result { + let EnvoyHeaderMatcher { name, invert_match, treat_missing_header_as_empty, header_match_specifier } = + value; + let header_name = HeaderNames::from_str(&name).with_node("name")?; + let header_matcher = convert_opt!(header_match_specifier)?; + Ok(Self { header_name, treat_missing_header_as_empty, invert_match, header_matcher }) + } + } + + impl TryFrom for StringMatcher { + type Error = GenericError; + fn try_from(value: EnvoyHeaderMatchSpecifier) -> Result { + match value { + EnvoyHeaderMatchSpecifier::StringMatch(matcher) => matcher.try_into(), + EnvoyHeaderMatchSpecifier::ContainsMatch(s) => { + Ok(Self { ignore_case: false, pattern: StringMatcherPattern::Contains(s.into()) }) + }, + EnvoyHeaderMatchSpecifier::ExactMatch(s) => { + Ok(Self { ignore_case: false, pattern: StringMatcherPattern::Exact(s.into()) }) + }, + EnvoyHeaderMatchSpecifier::SafeRegexMatch(r) => { + Ok(Self { ignore_case: false, pattern: StringMatcherPattern::Regex(regex_from_envoy(r)?) }) + }, + EnvoyHeaderMatchSpecifier::PrefixMatch(s) => { + Ok(Self { ignore_case: false, pattern: StringMatcherPattern::Prefix(s.into()) }) + }, + EnvoyHeaderMatchSpecifier::SuffixMatch(s) => { + Ok(Self { ignore_case: false, pattern: StringMatcherPattern::Suffix(s.into()) }) + }, + EnvoyHeaderMatchSpecifier::RangeMatch(_) => Err(GenericError::unsupported_variant("RangeMatch")), + EnvoyHeaderMatchSpecifier::PresentMatch(_) => Err(GenericError::unsupported_variant("PresentMatch")), + } + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/header_modifer.rs b/orion-configuration/src/config/network_filters/http_connection_manager/header_modifer.rs new file mode 100644 index 00000000..bebfcc41 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/header_modifer.rs @@ -0,0 +1,402 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{is_default, GenericError}; +use http::{HeaderMap, HeaderName, HeaderValue}; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; + +#[derive(Default, Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct HeaderModifier { + #[serde(with = "http_serde_ext::header_name::vec", default, skip_serializing_if = "is_default")] + remove: Vec, + #[serde(default, skip_serializing_if = "is_default")] + add: Vec, +} + +impl HeaderModifier { + pub fn new(remove: Vec, add: Vec) -> Self { + Self { remove, add } + } + pub fn modify(&self, header_map: &mut HeaderMap) { + for name in &self.remove { + header_map.remove(name); + } + for value in &self.add { + value.apply(header_map); + } + } +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub struct HeaderValueOption { + pub header: HeaderKeyValue, + pub append_action: HeaderAppendAction, + pub keep_empty_value: bool, +} + +impl HeaderValueOption { + pub fn apply(&self, header_map: &mut HeaderMap) -> bool { + if self.header.value.is_empty() && !self.keep_empty_value { + header_map.remove(&self.header.key).is_some() + } else { + match self.append_action { + HeaderAppendAction::AppendIfExistsOrAdd => { + header_map.append(&self.header.key, self.header.value.clone()); + true + }, + HeaderAppendAction::AppendIfAbsent => { + if header_map.get(&self.header.key).is_none() { + header_map.append(&self.header.key, self.header.value.clone()); + true + } else { + false + } + }, + HeaderAppendAction::OverwriteIfExistsOrAdd => { + header_map.insert(&self.header.key, self.header.value.clone()); + true + }, + HeaderAppendAction::OverwriteIfExists => { + if header_map.get(&self.header.key).is_some() { + header_map.insert(&self.header.key, self.header.value.clone()); + true + } else { + false + } + }, + } + } + } +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub enum HeaderAppendAction { + AppendIfExistsOrAdd, + AppendIfAbsent, + OverwriteIfExistsOrAdd, + OverwriteIfExists, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +pub struct HeaderKeyValue { + #[serde(with = "http_serde_ext::header_name")] + pub key: HeaderName, + //todo(hayley): this macro is too restricive. It only accepts headervalues that have printable ascii characters but + // the struct accepts opaque bytes too. + #[serde(with = "http_serde_ext::header_value")] + pub value: HeaderValue, +} + +impl TryFrom<(String, Vec)> for HeaderKeyValue { + type Error = GenericError; + fn try_from(value: (String, Vec)) -> Result { + let key = HeaderName::from_str(&value.0).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse \"{}\" as a HeaderName", value.0), e) + })?; + let value = HeaderValue::try_from(value.1) + .map_err(|e| GenericError::from_msg_with_cause("failed to parse bytes as HeaderValue", e))?; + Ok(Self { key, value }) + } +} +impl TryFrom<(String, String)> for HeaderKeyValue { + type Error = GenericError; + fn try_from(value: (String, String)) -> Result { + let key = HeaderName::from_str(&value.0).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse \"{}\" as a HeaderName", value.0), e) + })?; + let value = HeaderValue::from_str(&value.1).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse \"{}\" as a HeaderValue", value.1), e) + })?; + Ok(Self { key, value }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::network_filters::http_connection_manager::header_modifer::HeaderKeyValue; + use http::header::{COOKIE, LOCATION, USER_AGENT}; + use http::{HeaderMap, HeaderValue}; + + #[test] + fn test_header_mutation_append_if_exists_or_add() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + let world = HeaderValue::from_str("world").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: LOCATION, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(LOCATION), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: LOCATION, value: world.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.len(), 2); + + let mut iter = header_map.get_all(LOCATION).iter(); + assert_eq!(&hello, iter.next().unwrap()); + assert_eq!(&world, iter.next().unwrap()); + assert!(iter.next().is_none()); + } + + #[test] + fn test_header_mutation_inline_append() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + let world = HeaderValue::from_str("world").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: world.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.len(), 2); + } + + #[test] + fn test_header_mutation_cookie_append() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + let world = HeaderValue::from_str("world").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: COOKIE, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(COOKIE), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: COOKIE, value: world.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.len(), 2); + } + + #[test] + fn test_header_mutation_append_if_absent() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfAbsent, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfAbsent, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&hello)); + assert_eq!(header_map.len(), 1); + } + + #[test] + fn test_header_mutation_overwrite_if_exists_or_add() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + let world = HeaderValue::from_str("world").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::OverwriteIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: world.clone() }, + append_action: HeaderAppendAction::OverwriteIfExistsOrAdd, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&world)); + assert_eq!(header_map.len(), 1); + } + + #[test] + fn test_header_mutation_overwrite_if_exists() { + let header_map = &mut HeaderMap::::default(); + + let hello = HeaderValue::from_str("hello").unwrap(); + let world = HeaderValue::from_str("world").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::OverwriteIfExists, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), None); + assert!(header_map.is_empty()); + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: hello.clone() }, + append_action: HeaderAppendAction::AppendIfAbsent, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&hello)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: world.clone() }, + append_action: HeaderAppendAction::OverwriteIfExists, + keep_empty_value: false, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&world)); + assert_eq!(header_map.len(), 1); + } + + #[test] + fn test_header_mutation_append_empty_value() { + let header_map = &mut HeaderMap::::default(); + + let empty = HeaderValue::from_str("").unwrap(); + let test = HeaderValue::from_str("test").unwrap(); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: test.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: true, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&test)); + assert_eq!(header_map.len(), 1); + + HeaderValueOption { + header: HeaderKeyValue { key: USER_AGENT, value: empty.clone() }, + append_action: HeaderAppendAction::AppendIfExistsOrAdd, + keep_empty_value: true, + } + .apply(header_map); + + assert_eq!(header_map.get(USER_AGENT), Some(&test)); + assert_eq!(header_map.len(), 2); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{HeaderAppendAction, HeaderKeyValue, HeaderValueOption}; + use crate::config::common::*; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::{ + header_value_option::HeaderAppendAction as EnvoyHeaderAppendAction, HeaderValue as EnvoyHeaderValue, + HeaderValueOption as EnvoyHeaderValueOption, + }; + + impl TryFrom for HeaderValueOption { + type Error = GenericError; + fn try_from(value: EnvoyHeaderValueOption) -> Result { + let EnvoyHeaderValueOption { header, append, append_action, keep_empty_value } = value; + unsupported_field!(append)?; + let header = convert_opt!(header)?; + let append_action = HeaderAppendAction::try_from(append_action).with_node("append_action")?; + Ok(Self { header, append_action, keep_empty_value }) + } + } + + impl From for HeaderAppendAction { + fn from(value: EnvoyHeaderAppendAction) -> Self { + match value { + EnvoyHeaderAppendAction::AppendIfExistsOrAdd => Self::AppendIfExistsOrAdd, + EnvoyHeaderAppendAction::AddIfAbsent => Self::AppendIfAbsent, + EnvoyHeaderAppendAction::OverwriteIfExists => Self::OverwriteIfExists, + EnvoyHeaderAppendAction::OverwriteIfExistsOrAdd => Self::OverwriteIfExistsOrAdd, + } + } + } + + impl TryFrom for HeaderAppendAction { + type Error = GenericError; + fn try_from(value: i32) -> Result { + EnvoyHeaderAppendAction::from_i32(value) + .ok_or(GenericError::unsupported_variant("[unknown header append action]")) + .map(Self::from) + } + } + + impl TryFrom for HeaderKeyValue { + type Error = GenericError; + fn try_from(value: EnvoyHeaderValue) -> Result { + let EnvoyHeaderValue { key, value, raw_value } = value; + match (value.is_used(), raw_value.is_used()) { + (true, true) => { + Err(GenericError::from_msg(format!("both value ({value}) and raw_value ({raw_value:?}) were set")) + .with_node("value")) + }, + (true, false) => Self::try_from((key, value)), + (false, true) => Self::try_from((key, raw_value)), + (false, false) => Err(GenericError::MissingField("value OR raw_value")), + } + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs new file mode 100644 index 00000000..4a7343c9 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters.rs @@ -0,0 +1,271 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod http_rbac; +use compact_str::CompactString; +use http_rbac::HttpRbac; +pub mod local_rate_limit; +use local_rate_limit::LocalRateLimit; +pub mod router; + +use serde::{Deserialize, Serialize}; +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct FilterOverride { + // this field can be optional iff config is Some(_) + pub disabled: bool, + #[serde(skip_serializing_if = "is_default", default)] + pub filter_settings: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(untagged, rename_all = "snake_case")] +pub enum FilterConfigOverride { + LocalRateLimit(LocalRateLimit), + // in Envoy this is a seperate type, RbacPerRoute, but it only has one field named rbac with the full config. + // so we replace it with an option to be more rusty + Rbac(Option), +} + +impl From for FilterOverride { + fn from(value: FilterConfigOverride) -> Self { + Self { disabled: false, filter_settings: Some(value) } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HttpFilter { + pub name: CompactString, + #[serde(skip_serializing_if = "is_default", default)] + pub disabled: bool, + #[serde(flatten)] + pub filter: HttpFilterType, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case", tag = "filter_type", content = "filter_settings")] +pub enum HttpFilterType { + Rbac(HttpRbac), + RateLimit(LocalRateLimit), +} + +#[cfg(feature = "envoy-conversions")] +pub(crate) use envoy_conversions::*; + +use super::is_default; + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{FilterConfigOverride, FilterOverride, HttpFilter, HttpFilterType, HttpRbac}; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::route::v3::FilterConfig as EnvoyFilterConfig, + extensions::filters::{ + http::{ + local_ratelimit::v3::LocalRateLimit as EnvoyLocalRateLimit, + rbac::v3::{Rbac as EnvoyRbac, RbacPerRoute as EnvoyRbacPerRoute}, + router::v3::Router as EnvoyRouter, + }, + network::http_connection_manager::v3::{ + http_filter::ConfigType as EnvoyConfigType, HttpFilter as EnvoyHttpFilter, + }, + }, + }, + google::protobuf::Any, + prost::Message, + }; + + #[derive(Debug, Clone)] + pub(crate) struct SupportedEnvoyHttpFilter { + pub name: CompactString, + pub disabled: bool, + pub filter: SupportedEnvoyFilter, + } + + impl TryFrom for SupportedEnvoyHttpFilter { + type Error = GenericError; + fn try_from(envoy: EnvoyHttpFilter) -> Result { + let EnvoyHttpFilter { name, is_optional, disabled, config_type } = envoy; + unsupported_field!(is_optional)?; + let name: CompactString = required!(name)?.into(); + match required!(config_type).map(|x| match x { + EnvoyConfigType::ConfigDiscovery(_) => { + Err(GenericError::unsupported_variant("ConfigDiscovery")).with_node(name.clone()) + }, + EnvoyConfigType::TypedConfig(typed_config) => SupportedEnvoyFilter::try_from(typed_config), + }) { + Ok(Ok(filter)) => Ok(Self { name, filter, disabled }), + Err(e) | Ok(Err(e)) => Err(e.with_name(name)), + } + } + } + + impl TryFrom for HttpFilter { + type Error = GenericError; + fn try_from(value: SupportedEnvoyHttpFilter) -> Result { + let SupportedEnvoyHttpFilter { name, disabled, filter } = value; + Ok(Self { name, disabled, filter: filter.try_into()? }) + } + } + + impl TryFrom for HttpFilterType { + type Error = GenericError; + fn try_from(value: SupportedEnvoyFilter) -> Result { + match value { + SupportedEnvoyFilter::LocalRateLimit(lr) => lr.try_into().map(Self::RateLimit), + SupportedEnvoyFilter::Rbac(rbac) => rbac.try_into().map(Self::Rbac), + SupportedEnvoyFilter::Router(_) => { + Err(GenericError::from_msg("router filter has to be the last filter in the chain")) + }, + } + } + } + + #[allow(clippy::large_enum_variant)] + #[derive(Debug, Clone)] + pub(crate) enum SupportedEnvoyFilter { + LocalRateLimit(EnvoyLocalRateLimit), + Rbac(EnvoyRbac), + Router(EnvoyRouter), + } + + impl TryFrom for SupportedEnvoyFilter { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit" => { + EnvoyLocalRateLimit::decode(typed_config.value.as_slice()).map(Self::LocalRateLimit) + }, + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC" => { + EnvoyRbac::decode(typed_config.value.as_slice()).map(Self::Rbac) + }, + "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" => { + EnvoyRouter::decode(typed_config.value.as_slice()).map(Self::Router) + }, + _ => return Err(GenericError::unsupported_variant(typed_config.type_url)), + } + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + } + } + + #[derive(Debug, Clone)] + #[allow(clippy::large_enum_variant)] + pub enum SupportedEnvoyFilterOverride { + LocalRateLimit(EnvoyLocalRateLimit), + Rbac(EnvoyRbacPerRoute), + } + + impl TryFrom for SupportedEnvoyFilterOverride { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit" => { + EnvoyLocalRateLimit::decode(typed_config.value.as_slice()).map(Self::LocalRateLimit) + }, + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute" => { + EnvoyRbacPerRoute::decode(typed_config.value.as_slice()).map(Self::Rbac) + }, + _ => return Err(GenericError::unsupported_variant(typed_config.type_url)), + } + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + } + } + + #[derive(Debug, Clone)] + pub enum MaybeWrappedEnvoyFilter { + Wrapped(EnvoyFilterConfig), + Direct(SupportedEnvoyFilterOverride), + } + + impl TryFrom for MaybeWrappedEnvoyFilter { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.config.route.v3.FilterConfig" => { + EnvoyFilterConfig::decode(typed_config.value.as_slice()).map(Self::Wrapped).map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + }, + _ => SupportedEnvoyFilterOverride::try_from(typed_config).map(Self::Direct), + } + } + } + + impl TryFrom for FilterConfigOverride { + type Error = GenericError; + fn try_from(value: SupportedEnvoyFilterOverride) -> Result { + match value { + SupportedEnvoyFilterOverride::LocalRateLimit(envoy) => envoy.try_into().map(Self::LocalRateLimit), + SupportedEnvoyFilterOverride::Rbac(EnvoyRbacPerRoute { rbac }) => { + rbac.map(HttpRbac::try_from).transpose().map(Self::Rbac) + }, + } + } + } + + impl TryFrom for FilterConfigOverride { + type Error = GenericError; + fn try_from(envoy: Any) -> Result { + let supported = SupportedEnvoyFilterOverride::try_from(envoy)?; + supported.try_into() + } + } + impl TryFrom for FilterOverride { + type Error = GenericError; + fn try_from(envoy: EnvoyFilterConfig) -> Result { + let EnvoyFilterConfig { config, is_optional, disabled } = envoy; + unsupported_field!(is_optional)?; + let filter_settings = config.map(FilterConfigOverride::try_from).transpose().with_node("config")?; + Ok(Self { disabled, filter_settings }) + } + } + + impl TryFrom for FilterOverride { + type Error = GenericError; + fn try_from(envoy: MaybeWrappedEnvoyFilter) -> Result { + match envoy { + MaybeWrappedEnvoyFilter::Direct(envoy) => FilterConfigOverride::try_from(envoy).map(Self::from), + MaybeWrappedEnvoyFilter::Wrapped(envoy) => envoy.try_into(), + } + } + } + + impl TryFrom for FilterOverride { + type Error = GenericError; + fn try_from(envoy: Any) -> Result { + MaybeWrappedEnvoyFilter::try_from(envoy)?.try_into() + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/http_rbac.rs b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/http_rbac.rs new file mode 100644 index 00000000..6ff36a07 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/http_rbac.rs @@ -0,0 +1,273 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::network_filters::{http_connection_manager::header_matcher::HeaderMatcher, network_rbac::Action}; +use http::Request; +use serde::{Deserialize, Serialize}; +use tracing::debug; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HttpRbac { + pub action: Action, + //todo(hayley): replace vec with std::collections::BTreeMap + // and include the policy name as Envoy says to apply them + // in lexical order + pub policies: Vec, +} + +//since we support different permission and principals for http vs network rbac, this struct is different from the network rbac +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct Policy { + pub permissions: Vec, + pub principals: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Permission { + Any, + Header(HeaderMatcher), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Principal { + Any, + Header(HeaderMatcher), +} + +impl Permission { + fn is_applicable(&self, req: &Request) -> bool { + match self { + Self::Any => true, + Self::Header(h) => h.request_matches(req), + } + } +} + +impl Principal { + fn has_principal(&self, req: &Request) -> bool { + match self { + Principal::Any => true, + Principal::Header(h) => h.request_matches(req), + } + } +} + +impl Policy { + fn enforce(&self, req: &Request) -> bool { + let has_permission = self.permissions.iter().any(|p| p.is_applicable(req)); + let has_principal = self.principals.iter().any(|p| p.has_principal(req)); + debug!("Enforcing policy permissions {has_permission} principals {has_principal}"); + has_permission && has_principal + } +} + +impl HttpRbac { + pub fn is_permitted(&self, req: &Request) -> bool { + let is_enforced = self.policies.iter().any(|p| p.enforce(req)); + debug!("Rule is enforced {is_enforced}"); + match self.action { + Action::Allow => is_enforced, + Action::Deny => !is_enforced, + } + } +} + +#[cfg(test)] +mod rbac_tests { + use super::*; + use crate::config::core::{StringMatcher, StringMatcherPattern}; + use http::{header::HOST, HeaderMap, HeaderValue, Request}; + fn create_host_request(host: &str) -> Request<()> { + let mut hm = HeaderMap::new(); + hm.insert(HOST, HeaderValue::from_str(host).unwrap()); + let mut builder = http::request::Builder::new(); + *builder.headers_mut().unwrap() = hm; + builder.body(()).unwrap() + } + + fn create_host_matcher(host: &str) -> HeaderMatcher { + HeaderMatcher { + header_name: HOST.into(), + invert_match: false, + treat_missing_header_as_empty: false, + header_matcher: StringMatcher { ignore_case: true, pattern: StringMatcherPattern::Exact(host.into()) }, + } + } + + #[test] + fn rule_test_allow_any() { + let permission = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(&create_host_request("blah.com"))); + } + #[test] + fn rule_test_allow_host_permission_any_principal() { + let host = "blah.com"; + let permission = Permission::Header(create_host_matcher(host)); + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(&create_host_request(host))); + } + + #[test] + fn rule_test_allow_host_and_any_permission_any_principal() { + let host = "blah2.com"; + let permission1 = Permission::Header(create_host_matcher(host)); + let permission2 = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission1, permission2], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(&create_host_request(host))); + } + + #[test] + fn rule_test_allow_host_permission_host_principal() { + let host = "blah.com"; + let permission = Permission::Header(create_host_matcher(host)); + let principal = Principal::Header(create_host_matcher(host)); + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(&create_host_request(host))); + } + + #[test] + fn rule_test_deny_any() { + let permission = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Deny, policies: vec![policy] }; + assert!(!rbac_rule.is_permitted(&create_host_request("blah.com"))); + } + #[test] + fn rule_test_deny_host_permission_any_principal() { + let host = "blah.com"; + let permission = Permission::Header(create_host_matcher(host)); + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Deny, policies: vec![policy] }; + assert!(!rbac_rule.is_permitted(&create_host_request(host))); + } + + #[test] + fn rule_test_deny_host_permission_host_principal() { + let host = "blah.com"; + let permission = Permission::Header(create_host_matcher(host)); + let principal = Principal::Header(create_host_matcher(host)); + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = HttpRbac { action: Action::Deny, policies: vec![policy] }; + assert!(!rbac_rule.is_permitted(&create_host_request(host))); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{HttpRbac, Permission, Policy, Principal}; + use crate::config::{common::*, network_filters::network_rbac::Action}; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::rbac::v3::{ + permission::Rule as EnvoyPermissionRule, principal::Identifier as EnvoyPrincipalIdentifier, + Permission as EnvoyPermission, Policy as EnvoyPolicy, Principal as EnvoyPrincipal, Rbac as EnvoyHttpRbac, + }, + extensions::filters::http::rbac::v3::Rbac as EnvoyRbac, + }; + + impl TryFrom for HttpRbac { + type Error = GenericError; + fn try_from(envoy: EnvoyRbac) -> Result { + let EnvoyRbac { + rules, + matcher, + shadow_rules, + shadow_matcher, + shadow_rules_stat_prefix, + rules_stat_prefix, + track_per_rule_stats, + } = envoy; + unsupported_field!( + // rules, + matcher, + shadow_rules, + shadow_matcher, + shadow_rules_stat_prefix, + rules_stat_prefix, + track_per_rule_stats + )?; + convert_opt!(rules) + } + } + + impl TryFrom for HttpRbac { + type Error = GenericError; + + fn try_from(envoy: EnvoyHttpRbac) -> Result { + let EnvoyHttpRbac { action, policies, audit_logging_options } = envoy; + unsupported_field!(audit_logging_options)?; + let action = Action::try_from(action).with_node("action")?; + let policies = required!(policies)?.into_values().map(Policy::try_from).collect::>()?; + Ok(HttpRbac { action, policies }) + } + } + + impl TryFrom for Policy { + type Error = GenericError; + fn try_from(envoy: EnvoyPolicy) -> Result { + let EnvoyPolicy { permissions, principals, condition, checked_condition } = envoy; + unsupported_field!(condition, checked_condition)?; + let permissions = convert_vec!(permissions)?; + let principals = convert_vec!(principals)?; + Ok(Self { permissions, principals }) + } + } + + impl TryFrom for Permission { + type Error = GenericError; + fn try_from(envoy: EnvoyPermission) -> Result { + let EnvoyPermission { rule } = envoy; + match required!(rule)? { + EnvoyPermissionRule::Any(true) => Ok(Self::Any), + EnvoyPermissionRule::Any(false) => Err(GenericError::from_msg("Any has to be true")), + EnvoyPermissionRule::Header(header) => header.try_into().map(Self::Header), + //todo(hayley): write all these out + _ => return Err(GenericError::unsupported_variant("[Unsupported Permission Rule]")), + } + .with_node("rule") + } + } + + impl TryFrom for Principal { + type Error = GenericError; + fn try_from(value: EnvoyPrincipal) -> Result { + let EnvoyPrincipal { identifier } = value; + match required!(identifier)? { + EnvoyPrincipalIdentifier::Any(true) => Ok(Self::Any), + EnvoyPrincipalIdentifier::Any(false) => Err(GenericError::from_msg("Any has to be true")), + EnvoyPrincipalIdentifier::Header(header) => header.try_into().map(Self::Header), + _ => return Err(GenericError::unsupported_variant("[Unsupported Principal Identifier]")), + } + .with_node("identifier") + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/local_rate_limit.rs b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/local_rate_limit.rs new file mode 100644 index 00000000..c7f79929 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/local_rate_limit.rs @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use http::StatusCode; +use serde::{Deserialize, Serialize}; +use std::time::Duration; + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +pub struct LocalRateLimit { + #[serde( + with = "http_serde_ext::status_code", + skip_serializing_if = "is_default_statuscode", + default = "default_statuscode_deser" + )] + pub status: StatusCode, + pub max_tokens: u32, + pub tokens_per_fill: u32, + #[serde(with = "humantime_serde")] + pub fill_interval: Duration, +} + +const DEFAULT_RATE_LIMIT_STATUSCODE: StatusCode = StatusCode::TOO_MANY_REQUESTS; +const fn default_statuscode_deser() -> StatusCode { + DEFAULT_RATE_LIMIT_STATUSCODE +} +fn is_default_statuscode(code: &StatusCode) -> bool { + *code == DEFAULT_RATE_LIMIT_STATUSCODE +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::LocalRateLimit; + use crate::config::{ + common::*, + util::{duration_from_envoy, http_status_from_envoy}, + }; + use http::StatusCode; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + extensions::filters::http::local_ratelimit::v3::LocalRateLimit as EnvoyLocalRateLimit, + r#type::v3::TokenBucket as EnvoyTokenBucket, + }; + impl TryFrom for LocalRateLimit { + type Error = GenericError; + fn try_from(value: EnvoyLocalRateLimit) -> Result { + let EnvoyLocalRateLimit { + stat_prefix, + status, + token_bucket, + filter_enabled, + filter_enforced, + request_headers_to_add_when_not_enforced, + response_headers_to_add, + descriptors, + stage, + local_rate_limit_per_downstream_connection, + enable_x_ratelimit_headers, + vh_rate_limits, + always_consume_default_token_bucket, + rate_limited_as_resource_exhausted, + local_cluster_rate_limit, + rate_limits, + max_dynamic_descriptors, + } = value; + unsupported_field!( + // stat_prefix, + // status, + // token_bucket, + filter_enabled, + filter_enforced, + request_headers_to_add_when_not_enforced, + response_headers_to_add, + descriptors, + stage, + local_rate_limit_per_downstream_connection, + enable_x_ratelimit_headers, + vh_rate_limits, + always_consume_default_token_bucket, + rate_limited_as_resource_exhausted, + local_cluster_rate_limit, + rate_limits, + max_dynamic_descriptors + )?; + if stat_prefix.is_used() { + tracing::warn!("stat_prefix used in local_rate_limit, this field will be ignored."); + } + //note(hayley): envoy sets status codes <400 to 429 here. + // we might want to do some validation too + let status = status + .map(http_status_from_envoy) + .transpose() + .with_node("status")? + .unwrap_or(StatusCode::TOO_MANY_REQUESTS); + let EnvoyTokenBucket { max_tokens, tokens_per_fill, fill_interval } = required!(token_bucket)?; + let max_tokens = required!(max_tokens).with_node("token_bucket")?; + let tokens_per_fill = tokens_per_fill.map(|t| t.value).unwrap_or(1); + if tokens_per_fill == 0 { + return Err(GenericError::from_msg("tokens per fill can't be zero") + .with_node("tokens_per_fill") + .with_node("token_bucket")); + } + let fill_interval = + duration_from_envoy(required!(fill_interval)?).with_node("fill_interval").with_node("token_bucket")?; + Ok(Self { status, max_tokens, tokens_per_fill, fill_interval }) + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/router.rs b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/router.rs new file mode 100644 index 00000000..c85d0813 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/http_filters/router.rs @@ -0,0 +1,59 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub struct Router; + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::Router; + use crate::config::common::*; + use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::filters::http::router::v3::Router as EnvoyRouter; + impl TryFrom for Router { + type Error = GenericError; + fn try_from(value: EnvoyRouter) -> Result { + let EnvoyRouter { + dynamic_stats, + start_child_span, + upstream_log, + upstream_log_options, + suppress_envoy_headers, + strict_check_headers, + respect_expected_rq_timeout, + suppress_grpc_request_failure_code_stats, + upstream_http_filters, + } = value; + unsupported_field!( + //note: docs say this field defaults to true. So depending on our behaviour we might have to check this is instead set to + // Some(false) + dynamic_stats, + start_child_span, + upstream_log, + upstream_log_options, + suppress_envoy_headers, + strict_check_headers, + respect_expected_rq_timeout, + suppress_grpc_request_failure_code_stats, + upstream_http_filters + )?; + Ok(Self) + } + } +} diff --git a/orion-configuration/src/config/network_filters/http_connection_manager/route.rs b/orion-configuration/src/config/network_filters/http_connection_manager/route.rs new file mode 100644 index 00000000..b441d235 --- /dev/null +++ b/orion-configuration/src/config/network_filters/http_connection_manager/route.rs @@ -0,0 +1,836 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{header_matcher::HeaderMatcher, RetryPolicy}; +use crate::config::{ + cluster::ClusterSpecifier, + common::*, + core::{CaseSensitive, DataSource}, +}; +use bytes::Bytes; +use compact_str::CompactString; +use http::{ + uri::{Authority, InvalidUri, PathAndQuery, Scheme}, + HeaderName, Request, StatusCode, +}; +use regex::Regex; +use serde::{de::Error, Deserialize, Serialize}; +use std::{ + borrow::Cow, + hash::{Hash, Hasher}, + net::SocketAddr, + num::NonZeroU16, + ops::Range, + str::FromStr, + time::Duration, +}; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Action { + Route(RouteAction), + DirectResponse(DirectResponseAction), + Redirect(RedirectAction), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum AuthorityRedirect { + /// only redirect the port, leave the host as-is + PortRedirect(NonZeroU16), + /// only redirect the host, leave the port as-is + HostRedirect(#[serde(with = "http_serde_ext::authority")] Authority), + /// Redirect the whole authority (host:port) + AuthorityRedirect(#[serde(with = "http_serde_ext::authority")] Authority), +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename = "regex")] +pub struct RegexMatchAndSubstitute { + #[serde(with = "serde_regex")] + pub pattern: Regex, + pub substitution: CompactString, +} + +impl PartialEq for RegexMatchAndSubstitute { + fn eq(&self, other: &Self) -> bool { + self.substitution == other.substitution && self.pattern.as_str() == other.pattern.as_str() + } +} + +impl Eq for RegexMatchAndSubstitute {} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum PathRewriteSpecifier { + Path(#[serde(with = "http_serde_ext::path_and_query")] PathAndQuery), + Prefix(CompactString), + Regex(RegexMatchAndSubstitute), +} + +impl PathRewriteSpecifier { + /// will preserve the query part of the input if the replacement does not contain one + #[must_use] + pub fn apply( + &self, + path_and_query: Option<&PathAndQuery>, + route_match_result: &RouteMatchResult, + ) -> Result, InvalidUri> { + let old_path = path_and_query.map(PathAndQuery::path).unwrap_or_default(); + let old_query = path_and_query.map(PathAndQuery::query).unwrap_or_default(); + let new_path = match self { + //full overwrite, doesn't care what original was + PathRewriteSpecifier::Path(p) => p.path().into(), + // apply a regex tot the original + PathRewriteSpecifier::Regex(regex) => { + //we need to run the regex even if the original is empty because it could match against '^$' (^ = start-of-string, $ = end-of-string) + let replacement = regex.pattern.replace_all(old_path, regex.substitution.as_str()); + if let Cow::Borrowed(_) = replacement { + return Ok(None); + } else { + replacement + } + }, + PathRewriteSpecifier::Prefix(prefix) => { + if let Some(matched_range) = route_match_result.matched_range() { + let orig_without_prefix = &old_path[matched_range.end..]; + format!("{prefix}{orig_without_prefix}").into() + } else { + return Ok(None); + } + }, + }; + if let Some(old_query) = old_query { + if !new_path.contains('?') { + return Some(PathAndQuery::from_str(&format!("{new_path}?{old_query}"))).transpose(); + } + } + Some(PathAndQuery::from_str(&new_path)).transpose() + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RedirectAction { + pub response_code: RedirectResponseCode, + pub strip_query: bool, + #[serde(flatten)] + pub authority_redirect: Option, + #[serde(with = "http_serde_ext::scheme::option")] + pub scheme_rewrite_specifier: Option, + #[serde(flatten)] + pub path_rewrite_specifier: Option, +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum RedirectResponseCode { + Found, + MovedPermanently, + PermanentRedirect, + SeeOther, //🌊🦦 + TemporaryRedirect, +} + +impl From for StatusCode { + fn from(value: RedirectResponseCode) -> Self { + match value { + RedirectResponseCode::Found => StatusCode::FOUND, + RedirectResponseCode::MovedPermanently => StatusCode::MOVED_PERMANENTLY, + RedirectResponseCode::PermanentRedirect => StatusCode::PERMANENT_REDIRECT, + RedirectResponseCode::SeeOther => StatusCode::SEE_OTHER, + RedirectResponseCode::TemporaryRedirect => StatusCode::TEMPORARY_REDIRECT, + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct DirectResponseAction { + #[serde(with = "http_serde_ext::status_code")] + pub status: StatusCode, + #[serde(flatten)] + pub body: Option, +} + +impl DirectResponseAction { + pub fn body(&self) -> &[u8] { + self.body.as_ref().map(DirectResponseBody::data).unwrap_or_default() + } +} + +//hayley: +// we immidiatly load the value of the DataSource and cache it here, allowing us to +// use this type directly in orion-lib without having to worry about reading or caching the result in +// the datapath. A file being loaded once at startup and not reloaded when the file changes matches Envoy +// perhaps we want to reconsider this behaviour in the future. +// we also might want to use this representation for all DataSource, for consistency? +#[derive(Debug, Clone, Serialize)] +pub struct DirectResponseBody { + #[serde(flatten)] + source: DataSource, + #[serde(skip, default)] + pub data: Bytes, +} + +impl<'de> Deserialize<'de> for DirectResponseBody { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + pub struct Inner { + #[serde(flatten)] + source: DataSource, + } + let source = Inner::deserialize(deserializer)?.source; + let data = + source.to_bytes_blocking().map_err(|e| D::Error::custom(format!("failed to read datasource: {e}")))?.into(); + Ok(Self { source, data }) + } +} + +impl DirectResponseBody { + pub fn data(&self) -> &[u8] { + &self.data + } +} + +impl PartialEq for DirectResponseBody { + fn eq(&self, other: &Self) -> bool { + self.source.eq(&other.source) + } +} +impl Eq for DirectResponseBody {} + +//todo: impl serialize, deserialize on DirectResponsebody to prepare the bytes at deserialization + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RouteAction { + pub cluster_specifier: ClusterSpecifier, + #[serde( + with = "http_serde_ext::status_code", + skip_serializing_if = "is_default_statuscode", + default = "default_statuscode_deser" + )] + pub cluster_not_found_response_code: StatusCode, + #[serde(with = "humantime_serde")] + #[serde(skip_serializing_if = "is_default_timeout", default = "default_timeout_deser")] + pub timeout: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub rewrite: Option, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + //note(hayley): we wrap this struct in an Arc because orion-lib is designed around that. + // ideally we would check if we could instead use a referenve in orion-lib but that's a large refactor + pub retry_policy: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub hash_policy: Vec, +} + +const DEFAULT_CLUSTER_NOT_FOUND_STATUSCODE: StatusCode = StatusCode::SERVICE_UNAVAILABLE; +const fn default_statuscode_deser() -> StatusCode { + DEFAULT_CLUSTER_NOT_FOUND_STATUSCODE +} +fn is_default_statuscode(code: &StatusCode) -> bool { + *code == DEFAULT_CLUSTER_NOT_FOUND_STATUSCODE +} + +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(15); +#[allow(clippy::unnecessary_wraps)] +const fn default_timeout_deser() -> Option { + Some(DEFAULT_TIMEOUT) +} +fn is_default_timeout(timeout: &Option) -> bool { + *timeout == default_timeout_deser() +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct HashPolicy { + pub policy_specifier: PolicySpecifier, + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + pub terminal: bool, +} + +#[derive(Clone, Debug, Copy)] +pub enum HashPolicyResult { + Applied, + Skipped, + Terminal, +} + +impl HashPolicy { + pub fn apply(&self, hasher: &mut impl Hasher, req: &Request, src_addr: SocketAddr) -> HashPolicyResult { + let applied = match &self.policy_specifier { + PolicySpecifier::SourceIp(true) => { + src_addr.hash(hasher); + true + }, + PolicySpecifier::SourceIp(false) => false, + PolicySpecifier::Header(name) => req.headers().get(name).inspect(|value| value.hash(hasher)).is_some(), + PolicySpecifier::QueryParameter(name) => req + .uri() + .query() + .and_then(|query| { + // Hash the value of the first query key that matches (case-sensitive) + //note(hayley): we might slightly improve performance here by urlencoding the name parameter + // instead of decoding the query + url::form_urlencoded::parse(query.as_bytes()).find(|(key, _value)| key == name) + }) + .inspect(|(_key, value)| value.hash(hasher)) + .is_some(), + }; + + match (applied, self.terminal) { + (true, true) => HashPolicyResult::Terminal, + (true, false) => HashPolicyResult::Applied, + (false, _) => HashPolicyResult::Skipped, + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum PolicySpecifier { + SourceIp(bool), + Header(#[serde(with = "http_serde_ext::header_name")] HeaderName), + QueryParameter(CompactString), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct RouteMatch { + // todo(hayley): can't be none? + #[serde(flatten)] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub path_matcher: Option, + #[serde(skip_serializing_if = "Vec::is_empty", default = "Default::default")] + pub headers: Vec, +} + +impl Default for RouteMatch { + fn default() -> Self { + Self { + path_matcher: Some(PathMatcher { specifier: PathSpecifier::Prefix("".into()), ignore_case: false }), + headers: Vec::new(), + } + } +} + +pub struct RouteMatchResult { + path_match: Option, + headers_matched: bool, +} + +impl RouteMatchResult { + pub fn matched(&self) -> bool { + self.headers_matched && self.path_match.as_ref().map(PathMatcherResult::matched).unwrap_or(true) + } + + pub fn matched_range(&self) -> Option> { + if self.headers_matched { + if let Some(pmr) = &self.path_match { + return pmr.matched_range(); + } + } + None + } +} + +impl RouteMatch { + pub fn match_request(&self, request: &Request) -> RouteMatchResult { + let path_match = self.path_matcher.as_ref().map(|path_matcher| { + //todo(hayley): how do we treat empty paths here? + path_matcher.matches(request.uri().path_and_query().unwrap_or(&PathAndQuery::from_static(""))) + }); + //short circuit if path match fails + let headers_matched = if path_match.is_some() { + self.headers.iter().all(|matcher| matcher.request_matches(request)) + } else { + false + }; + RouteMatchResult { path_match, headers_matched } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct PathMatcher { + #[serde(flatten)] + specifier: PathSpecifier, + #[serde(skip_serializing_if = "std::ops::Not::not", default = "Default::default")] + ignore_case: bool, +} + +pub struct PathMatcherResult { + inner: Option, +} + +impl PathMatcherResult { + pub fn matched(&self) -> bool { + self.inner.is_some() + } + + pub fn matched_range(&self) -> Option> { + self.inner.map(|up_to| 0..up_to) + } +} + +impl PathMatcher { + pub fn matches(&self, path: &PathAndQuery) -> PathMatcherResult { + let path = path.path(); + let case_matcher = CaseSensitive(!self.ignore_case, path); + let inner = match &self.specifier { + PathSpecifier::Exact(s) => case_matcher.equals(s).then_some(s.len()), + PathSpecifier::Prefix(p) => case_matcher.starts_with(p).then_some(p.len()), + PathSpecifier::PathSeparatedPrefix(psp) => { + if case_matcher.equals(&psp[..psp.len() - 1]) { + Some(psp.len() - 1) + } else if case_matcher.starts_with(psp) { + Some(psp.len()) + } else { + None + } + }, + PathSpecifier::Regex(r) => r.matches_full(path).then_some(path.len()), + }; + PathMatcherResult { inner } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum PathSpecifier { + Prefix(CompactString), + Exact(CompactString), + Regex(#[serde(with = "serde_regex")] Regex), + PathSeparatedPrefix(CompactString), +} + +impl PartialEq for PathSpecifier { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Regex(r1), Self::Regex(r2)) => r1.as_str().eq(r2.as_str()), + (Self::Prefix(s1), Self::Prefix(s2)) + | (Self::Exact(s1), Self::Exact(s2)) + | (Self::PathSeparatedPrefix(s1), Self::PathSeparatedPrefix(s2)) => s1.eq(s2), + _ => false, + } + } +} + +impl Eq for PathSpecifier {} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_rewrite_uri_by_path_match_range() { + let uri = PathAndQuery::from_str("/test/123").unwrap(); + let expected = Some(PathAndQuery::from_str("/hello/123").unwrap()); + let result = PathRewriteSpecifier::Prefix("/hello".into()) + .apply( + Some(&uri), + &RouteMatchResult { path_match: Some(PathMatcherResult { inner: Some(5) }), headers_matched: true }, + ) + .unwrap(); + assert_eq!(result, expected); + } + + #[test] + fn test_rewrite_uri_by_regex() { + let uri = PathAndQuery::from_str("/test/123").unwrap(); + let expected = Some(PathAndQuery::from_str("/hello/123").unwrap()); + let result = PathRewriteSpecifier::Regex(RegexMatchAndSubstitute { + pattern: Regex::new("/test(.*)").unwrap(), + substitution: "/hello$1".into(), + }) + .apply( + Some(&uri), + &RouteMatchResult { path_match: Some(PathMatcherResult { inner: None }), headers_matched: true }, + ) + .unwrap(); + assert_eq!(result, expected); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ + Action, AuthorityRedirect, DirectResponseAction, DirectResponseBody, HashPolicy, PathMatcher, + PathRewriteSpecifier, PathSpecifier, PolicySpecifier, RedirectAction, RedirectResponseCode, + RegexMatchAndSubstitute, RouteAction, RouteMatch, DEFAULT_TIMEOUT, + }; + use crate::config::network_filters::http_connection_manager::RetryPolicy; + use crate::config::{ + common::*, + core::{regex_from_envoy, DataSource}, + util::{duration_from_envoy, http_status_from}, + }; + use http::{ + uri::{Authority, PathAndQuery, Scheme}, + HeaderName, + }; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::route::v3::{ + redirect_action::{ + PathRewriteSpecifier as EnvoyPathRewriteSpecifier, RedirectResponseCode as EnvoyRedirectResponseCode, + SchemeRewriteSpecifier as EnvoySchemeRewriteSpecifier, + }, + route::Action as EnvoyAction, + route_action::{ + hash_policy::{ + ConnectionProperties as EnvoyConnectionProperties, Header as EnvoyHeader, + PolicySpecifier as EnvoyPolicySpecifier, QueryParameter as EnvoyQueryParameter, + }, + HashPolicy as EnvoyHashPolicy, + }, + route_match::PathSpecifier as EnvoyPathSpecifier, + DirectResponseAction as EnvoyDirectResponseAction, RedirectAction as EnvoyRedirectAction, + RouteAction as EnvoyRouteAction, RouteMatch as EnvoyRouteMatch, + }, + r#type::matcher::v3::RegexMatchAndSubstitute as EnvoyRegexMatchAndSubstitute, + }; + use std::{num::NonZeroU16, str::FromStr}; + + impl TryFrom for HashPolicy { + type Error = GenericError; + fn try_from(value: EnvoyHashPolicy) -> Result { + let EnvoyHashPolicy { terminal, policy_specifier } = value; + let policy_specifier = convert_opt!(policy_specifier)?; + Ok(Self { policy_specifier, terminal }) + } + } + impl TryFrom for Action { + type Error = GenericError; + fn try_from(value: EnvoyAction) -> Result { + Ok(match value { + EnvoyAction::DirectResponse(dr) => Self::DirectResponse(dr.try_into()?), + EnvoyAction::Redirect(rd) => Self::Redirect(rd.try_into()?), + EnvoyAction::Route(r) => Self::Route(r.try_into()?), + EnvoyAction::FilterAction(_) => return Err(GenericError::unsupported_variant("FilterAction")), + EnvoyAction::NonForwardingAction(_) => { + return Err(GenericError::unsupported_variant("NonForwardingAction")) + }, + }) + } + } + + impl TryFrom for RegexMatchAndSubstitute { + type Error = GenericError; + fn try_from(value: EnvoyRegexMatchAndSubstitute) -> Result { + let EnvoyRegexMatchAndSubstitute { pattern, substitution } = value; + let pattern = regex_from_envoy(required!(pattern)?)?; + let substitution = substitution.into(); + Ok(Self { pattern, substitution }) + } + } + + impl From for RedirectResponseCode { + fn from(value: EnvoyRedirectResponseCode) -> Self { + match value { + EnvoyRedirectResponseCode::Found => Self::Found, + EnvoyRedirectResponseCode::MovedPermanently => Self::MovedPermanently, + EnvoyRedirectResponseCode::PermanentRedirect => Self::PermanentRedirect, + EnvoyRedirectResponseCode::SeeOther => Self::SeeOther, + EnvoyRedirectResponseCode::TemporaryRedirect => Self::TemporaryRedirect, + } + } + } + + impl TryFrom for RedirectAction { + type Error = GenericError; + fn try_from(value: EnvoyRedirectAction) -> Result { + let EnvoyRedirectAction { + host_redirect, + port_redirect, + response_code, + strip_query, + scheme_rewrite_specifier, + path_rewrite_specifier, + } = value; + + let response_code = EnvoyRedirectResponseCode::from_i32(response_code) + .ok_or_else(|| { + GenericError::from_msg(format!("[unknown response_code {response_code}]")) + .with_node("response_code") + })? + .into(); + + let port_redirect = u16::try_from(port_redirect).map(NonZeroU16::new).map_err(|_| { + GenericError::from_msg("{port_redirect} is not a valid port").with_node("port_redirect") + })?; + let host_redirect = host_redirect.is_used().then_some(host_redirect); + let authority_redirect = match (host_redirect, port_redirect) { + (None, None) => None, + //can contain a port number in the host section too + (Some(host), None) => { + Some(AuthorityRedirect::HostRedirect(Authority::from_str(&host).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse {host} as authority"), e) + .with_node("host_redirect") + })?)) + }, + (None, Some(port)) => Some(AuthorityRedirect::PortRedirect(port)), + (Some(host), Some(port)) => { + let authority_string = format!("{host}:{port}"); + match Authority::from_str(&authority_string) { + Ok(authority) => Some(AuthorityRedirect::AuthorityRedirect(authority)), + Err(e) => { + // Envoy lets you set both host_redirect and port redirect + // but if you specify both a host_redirect and a port redirect it just appends them together + // and creates an invalid authority. + return Err(GenericError::from_msg_with_cause( + format!("failed to parse {authority_string} as authority"), + e, + ) + .with_node("host_redirect")); + }, + } + }, + }; + let scheme_rewrite_specifier = scheme_rewrite_specifier + .and_then(|s| match s { + EnvoySchemeRewriteSpecifier::HttpsRedirect(true) => Some(Ok(Scheme::HTTPS)), + EnvoySchemeRewriteSpecifier::SchemeRedirect(s) if s.eq_ignore_ascii_case("https") => { + Some(Ok(Scheme::HTTPS)) + }, + + EnvoySchemeRewriteSpecifier::SchemeRedirect(s) if s.eq_ignore_ascii_case("http") => { + Some(Ok(Scheme::HTTP)) + }, + + EnvoySchemeRewriteSpecifier::SchemeRedirect(s) => { + Some(Scheme::from_str(&s.to_lowercase()).map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse {s} as scheme"), e) + })) + }, + // nothing happens? + EnvoySchemeRewriteSpecifier::HttpsRedirect(false) => None, + }) + .transpose() + .with_node("scheme_rewrite")?; + + let path_rewrite_specifier = path_rewrite_specifier + .map(|path_rewrite_specifier| match path_rewrite_specifier { + EnvoyPathRewriteSpecifier::PathRedirect(pr) => PathAndQuery::from_str(&pr) + .map_err(|e| { + GenericError::from_msg_with_cause(format!("failed to parse {pr} as a path and query"), e) + }) + .map(PathRewriteSpecifier::Path), + EnvoyPathRewriteSpecifier::PrefixRewrite(prefix) => Ok(PathRewriteSpecifier::Prefix(prefix.into())), + EnvoyPathRewriteSpecifier::RegexRewrite(regex) => regex.try_into().map(PathRewriteSpecifier::Regex), + }) + .transpose() + .with_node("path_rewrite_specifier")?; + Ok(Self { + response_code, + strip_query, + authority_redirect, + scheme_rewrite_specifier, + path_rewrite_specifier, + }) + } + } + impl TryFrom for DirectResponseAction { + type Error = GenericError; + fn try_from(value: EnvoyDirectResponseAction) -> Result { + let EnvoyDirectResponseAction { status, body } = value; + let status = http_status_from(required!(status)?).with_node("status")?; + let body = if let Some(source) = body.map(DataSource::try_from).transpose().with_node("body")? { + let data = source + .to_bytes_blocking() + .map_err(|e| GenericError::from_msg_with_cause("failed to read datasource", e)) + .with_node("body")? + .into(); + Some(DirectResponseBody { source, data }) + } else { + None + }; + Ok(Self { status, body }) + } + } + + impl TryFrom for RouteAction { + type Error = GenericError; + fn try_from(value: EnvoyRouteAction) -> Result { + let EnvoyRouteAction { + cluster_not_found_response_code, + metadata_match, + prefix_rewrite, + regex_rewrite, + path_rewrite_policy, + append_x_forwarded_host, + timeout, + idle_timeout, + early_data_policy, + retry_policy, + retry_policy_typed_config, + request_mirror_policies, + priority, + rate_limits, + include_vh_rate_limits, + hash_policy, + cors, + max_grpc_timeout, + grpc_timeout_offset, + upgrade_configs, + internal_redirect_policy, + internal_redirect_action, + max_internal_redirects, + hedge_policy, + max_stream_duration, + cluster_specifier, + host_rewrite_specifier, + } = value; + unsupported_field!( + // cluster_not_found_response_code, + metadata_match, + // prefix_rewrite, + // regex_rewrite, + path_rewrite_policy, + append_x_forwarded_host, + // timeout, + idle_timeout, + early_data_policy, + // retry_policy, + retry_policy_typed_config, + request_mirror_policies, + priority, + rate_limits, + include_vh_rate_limits, + // hash_policy, + cors, + max_grpc_timeout, + grpc_timeout_offset, + upgrade_configs, + internal_redirect_policy, + internal_redirect_action, + max_internal_redirects, + hedge_policy, + max_stream_duration, + // cluster_specifier, + host_rewrite_specifier + )?; + let cluster_not_found_response_code = cluster_not_found_response_code + .is_used() + .then(|| http_status_from(cluster_not_found_response_code)) + .unwrap_or(Ok(super::DEFAULT_CLUSTER_NOT_FOUND_STATUSCODE)) + .with_node("cluster_not_found_response_code")?; + let timeout = timeout.map(duration_from_envoy).unwrap_or(Ok(DEFAULT_TIMEOUT)).with_node("timeout")?; + // in envoy, the default value for the timeout (if not set) is 15s, but setting the timeout disables it. + // in order to better match the rest of the code/rust, we map disabled to None and the default to Some(15s) + let timeout = if timeout.is_zero() { None } else { Some(timeout) }; + let cluster_specifier = convert_opt!(cluster_specifier)?; + let rewrite = match (prefix_rewrite.is_used().then_some(prefix_rewrite), regex_rewrite) { + (None, None) => None, + (Some(s), None) => Some(PathRewriteSpecifier::Prefix(s.into())), + (None, Some(regex)) => { + Some(regex.try_into().map(PathRewriteSpecifier::Regex).with_node("regex_rewrite")?) + }, + (Some(_), Some(_)) => { + return Err(GenericError::from_msg( + "only one of `prefix_rewrite` and `regex_rewrite` may be specified", + )) + }, + }; + let retry_policy = retry_policy.map(RetryPolicy::try_from).transpose().with_node("retry_policy")?; + let hash_policy = convert_vec!(hash_policy)?; + Ok(Self { cluster_not_found_response_code, timeout, cluster_specifier, rewrite, retry_policy, hash_policy }) + } + } + + impl TryFrom for PolicySpecifier { + type Error = GenericError; + fn try_from(value: EnvoyPolicySpecifier) -> Result { + Ok(match value { + EnvoyPolicySpecifier::ConnectionProperties(EnvoyConnectionProperties { source_ip }) => { + Self::SourceIp(source_ip) + }, + EnvoyPolicySpecifier::Header(EnvoyHeader { header_name, regex_rewrite }) => { + unsupported_field!(regex_rewrite)?; + PolicySpecifier::Header(HeaderName::from_str(&header_name).map_err(|e| { + GenericError::from_msg_with_cause( + format!("Couldn't convert \"{header_name}\" to a header name"), + e, + ) + })?) + }, + EnvoyPolicySpecifier::QueryParameter(EnvoyQueryParameter { name }) => { + Self::QueryParameter(required!(name)?.into()) + }, + EnvoyPolicySpecifier::Cookie(_) => return Err(GenericError::unsupported_variant("Cookie")), + EnvoyPolicySpecifier::FilterState(_) => return Err(GenericError::unsupported_variant("FilterState")), + }) + } + } + + impl TryFrom for RouteMatch { + type Error = GenericError; + fn try_from(value: EnvoyRouteMatch) -> Result { + let EnvoyRouteMatch { + case_sensitive, + runtime_fraction, + headers, + query_parameters, + grpc, + tls_context, + dynamic_metadata, + path_specifier, + filter_state, + } = value; + unsupported_field!( + // case_sensitive, + runtime_fraction, + // headers, + query_parameters, + grpc, + tls_context, + dynamic_metadata, // path_specifier, + filter_state + )?; + let ignore_case = !case_sensitive.map(|v| v.value).unwrap_or(true); + let path_specifier = path_specifier.map(PathSpecifier::try_from).transpose().with_node("path_specifier")?; + let headers = convert_vec!(headers)?; + let path_matcher = path_specifier.map(|specifier| PathMatcher { specifier, ignore_case }); + Ok(Self { path_matcher, headers }) + } + } + + impl TryFrom for PathSpecifier { + type Error = GenericError; + fn try_from(value: EnvoyPathSpecifier) -> Result { + match value { + EnvoyPathSpecifier::Prefix(s) => Ok(Self::Prefix(s.into())), + EnvoyPathSpecifier::Path(s) => Ok(Self::Exact(s.into())), + EnvoyPathSpecifier::SafeRegex(r) => regex_from_envoy(r).map(Self::Regex), + EnvoyPathSpecifier::PathSeparatedPrefix(mut s) => { + if s.ends_with('/') || s.contains(['?', '/']) { + Err(GenericError::from_msg(format!( + "PathSeperatedPrefix \"{s}\" contains invalid characters ('?' or '/') or ends with '/'" + ))) + } else { + s.push('/'); + Ok(Self::PathSeparatedPrefix(s.into())) + } + }, + EnvoyPathSpecifier::ConnectMatcher(_) => Err(GenericError::unsupported_variant("ConnectMatcher")), + EnvoyPathSpecifier::PathMatchPolicy(_) => Err(GenericError::unsupported_variant("PathMatchPolicy")), + } + } + } +} diff --git a/orion-configuration/src/config/network_filters/network_rbac.rs b/orion-configuration/src/config/network_filters/network_rbac.rs new file mode 100644 index 00000000..b7a30aed --- /dev/null +++ b/orion-configuration/src/config/network_filters/network_rbac.rs @@ -0,0 +1,321 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::core::StringMatcher; +use ipnet::IpNet; +use serde::{Deserialize, Serialize}; +use std::net::SocketAddr; +use tracing::debug; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct NetworkRbac { + pub action: Action, + //fixme(hayley): replace vec with std::collections::BTreeMap + // and include the policy name as Envoy says to apply them + // in lexical order + pub policies: Vec, +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Action { + Allow, + Deny, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct Policy { + pub permissions: Vec, + pub principals: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Permission { + Any, + DestinationIp(IpNet), + DestinationPort(u16), + DestinationPortRange(std::ops::Range), + ServerName(StringMatcher), +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Principal { + Any, + DownstreamRemoteIp(IpNet), +} + +#[derive(Debug, Clone, Copy)] +pub struct NetworkContext<'a> { + destination: SocketAddr, + downstream: SocketAddr, + server_name: Option<&'a str>, +} + +impl<'a> NetworkContext<'a> { + pub fn new(destination: SocketAddr, downstream: SocketAddr, server_name: Option<&'a str>) -> Self { + Self { destination, downstream, server_name } + } +} + +impl Permission { + fn is_applicable(&self, ctx: NetworkContext) -> bool { + match self { + Self::Any => true, + Self::DestinationIp(ip) => ip.contains(&ctx.destination.ip()), + Self::DestinationPort(port) => ctx.destination.port() == *port, + Self::DestinationPortRange(range) => range.contains(&ctx.destination.port()), + Self::ServerName(matcher) => match ctx.server_name.map(|sn| matcher.matches(sn)) { + Some(true) => true, + Some(false) | None => false, + }, + } + } +} + +impl Principal { + fn has_principal(&self, ctx: NetworkContext) -> bool { + match self { + Principal::Any => true, + Principal::DownstreamRemoteIp(ip) => ip.contains(&ctx.downstream.ip()), + } + } +} + +impl Policy { + fn enforce(&self, ctx: NetworkContext) -> bool { + let has_permission = self.permissions.iter().any(|p| p.is_applicable(ctx)); + let has_principal = self.principals.iter().any(|p| p.has_principal(ctx)); + debug!("Enforcing policy permissions {has_permission} principals {has_principal}"); + has_permission && has_principal + } +} + +impl NetworkRbac { + pub fn is_permitted(&self, ctx: NetworkContext) -> bool { + let is_enforced = self.policies.iter().any(|p| p.enforce(ctx)); + debug!("Rule is enforced {is_enforced}"); + match self.action { + Action::Allow => is_enforced, + Action::Deny => !is_enforced, + } + } +} + +#[cfg(test)] +mod tests { + + use super::{Action, NetworkContext, NetworkRbac, Permission, Policy, Principal}; + + fn create_network_context<'a>( + destination_ip: &str, + destination_port: u16, + downstream_ip: &str, + downstream_port: u16, + server_name: Option<&'a str>, + ) -> NetworkContext<'a> { + NetworkContext { + destination: format!("{destination_ip}:{destination_port}").parse().unwrap(), + downstream: format!("{downstream_ip}:{downstream_port}").parse().unwrap(), + server_name, + } + } + + #[test] + fn rule_test_allow_any() { + let permission = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = NetworkRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(create_network_context("127.0.0.1", 8000, "127.0.0.1", 9000, None))); + } + #[test] + fn rule_test_allow_dest_ip_permission_any_principal() { + let permission = Permission::DestinationIp("127.0.0.0/24".parse().unwrap()); + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = NetworkRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(create_network_context("127.0.0.1", 8000, "127.0.0.1", 9000, None))); + } + + #[test] + fn rule_test_allow_dest_ip_host_and_any_permission_any_principal() { + let permission2 = Permission::DestinationIp("127.0.0.0/24".parse().unwrap()); + let permission1 = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission1, permission2], principals: vec![principal] }; + let rbac_rule = NetworkRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(create_network_context("127.0.0.1", 8000, "127.0.0.1", 9000, None))); + } + + #[test] + fn rule_test_allow_dest_ip_and_any_permission_any_principal_negative() { + let permission2 = Permission::DestinationIp("192.168.1.0/24".parse().unwrap()); + let permission1 = Permission::Any; + let principal = Principal::Any; + let policy = Policy { permissions: vec![permission1, permission2], principals: vec![principal] }; + let rbac_rule = NetworkRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(create_network_context("127.0.0.1", 8000, "127.0.0.1", 9000, None))); + } + + #[test] + fn rule_test_allow_dest_ip_permission_src_ip_principal() { + let permission = Permission::DestinationIp("127.0.0.0/24".parse().unwrap()); + let principal = Principal::DownstreamRemoteIp("127.0.0.0/24".parse().unwrap()); + let policy = Policy { permissions: vec![permission], principals: vec![principal] }; + let rbac_rule = NetworkRbac { action: Action::Allow, policies: vec![policy] }; + assert!(rbac_rule.is_permitted(create_network_context("127.0.0.1", 8000, "127.0.0.1", 9000, None))); + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{Action, NetworkRbac, Permission, Policy, Principal}; + use crate::config::{common::*, core::CidrRange, util::u32_to_u16}; + use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::rbac::v3::{ + permission::Rule as EnvoyPermissionRule, principal::Identifier as EnvoyPrincipalIdentifier, + rbac::Action as EnvoyAction, Permission as EnvoyPermission, Policy as EnvoyPolicy, + Principal as EnvoyPrincipal, Rbac as EnvoyRbac, + }, + extensions::filters::network::rbac::v3::Rbac as EnvoyNetworkRbac, + r#type::v3::Int32Range, + }; + + impl TryFrom for NetworkRbac { + type Error = GenericError; + + fn try_from(envoy: EnvoyRbac) -> Result { + let EnvoyRbac { action, policies, audit_logging_options } = envoy; + unsupported_field!(audit_logging_options)?; + let action = Action::try_from(action).with_node("action")?; + let policies = required!(policies)?.into_values().map(Policy::try_from).collect::>()?; + Ok(NetworkRbac { action, policies }) + } + } + impl TryFrom for NetworkRbac { + type Error = GenericError; + fn try_from(envoy: EnvoyNetworkRbac) -> Result { + let EnvoyNetworkRbac { + rules, + matcher, + shadow_rules, + shadow_matcher, + shadow_rules_stat_prefix, + stat_prefix, + enforcement_type, + delay_deny, + } = envoy; + unsupported_field!( + // rules, + matcher, + shadow_rules, + shadow_matcher, + shadow_rules_stat_prefix, + // stat_prefix, + enforcement_type, + delay_deny + )?; + if stat_prefix.is_used() { + tracing::warn!( + "unsupported field stat_prefix used in network rbac filter. This field will be ignored." + ); + } + convert_opt!(rules) + } + } + + impl TryFrom for Action { + type Error = GenericError; + fn try_from(value: i32) -> Result { + EnvoyAction::try_from(value) + .map_err(|_| GenericError::unsupported_variant(format!("[unknown action {value}]")))? + .try_into() + } + } + + impl TryFrom for Action { + type Error = GenericError; + fn try_from(value: EnvoyAction) -> Result { + Ok(match value { + EnvoyAction::Allow => Self::Allow, + EnvoyAction::Deny => Self::Deny, + EnvoyAction::Log => return Err(GenericError::unsupported_variant("Log")), + }) + } + } + impl TryFrom for Policy { + type Error = GenericError; + fn try_from(envoy: EnvoyPolicy) -> Result { + let EnvoyPolicy { permissions, principals, condition, checked_condition } = envoy; + unsupported_field!(condition, checked_condition)?; + let permissions = convert_non_empty_vec!(permissions)?; + let principals = convert_non_empty_vec!(principals)?; + Ok(Self { permissions, principals }) + } + } + + impl TryFrom for Permission { + type Error = GenericError; + fn try_from(envoy: EnvoyPermission) -> Result { + let EnvoyPermission { rule } = envoy; + match required!(rule)? { + EnvoyPermissionRule::Any(true) => Ok(Self::Any), + EnvoyPermissionRule::Any(false) => Err(GenericError::from_msg("Any has to be true")), + EnvoyPermissionRule::DestinationIp(destination_ip) => { + CidrRange::try_from(destination_ip).map(CidrRange::into_ipnet).map(Self::DestinationIp) + }, + EnvoyPermissionRule::DestinationPort(port) => u32_to_u16(port).map(Self::DestinationPort), + EnvoyPermissionRule::DestinationPortRange(Int32Range { start, end }) => { + match (start.try_into(), end.try_into()) { + (Ok(start), Ok(end)) if start < end => Ok(Self::DestinationPortRange(start..end)), + (Ok(_), Ok(_)) => { + Err(GenericError::from_msg(format!("lower port range {start} is >= upper range {end}"))) + }, + (Err(_), _) | (_, Err(_)) => { + Err(GenericError::from_msg(format!("Invalid port range {start}..{end}"))) + }, + } + }, + EnvoyPermissionRule::RequestedServerName(matcher) => matcher.try_into().map(Self::ServerName), + _ => Err(GenericError::unsupported_variant("[Unsupported Permission Rule]")), + } + .with_node("rule") + } + } + + impl TryFrom for Principal { + type Error = GenericError; + fn try_from(value: EnvoyPrincipal) -> Result { + let EnvoyPrincipal { identifier } = value; + match required!(identifier)? { + EnvoyPrincipalIdentifier::Any(true) => Ok(Self::Any), + EnvoyPrincipalIdentifier::Any(false) => Err(GenericError::from_msg("Any has to be true")), + EnvoyPrincipalIdentifier::DirectRemoteIp(cidr) => { + CidrRange::try_from(cidr).map(CidrRange::into_ipnet).map(Self::DownstreamRemoteIp) + }, + _ => return Err(GenericError::unsupported_variant("[Unsupported Principal Identifier]")), + } + .with_node("identifier") + } + } +} diff --git a/orion-configuration/src/config/network_filters/tcp_proxy.rs b/orion-configuration/src/config/network_filters/tcp_proxy.rs new file mode 100644 index 00000000..7b456bd3 --- /dev/null +++ b/orion-configuration/src/config/network_filters/tcp_proxy.rs @@ -0,0 +1,84 @@ +#![allow(deprecated)] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::cluster::ClusterSpecifier; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct TcpProxy { + pub cluster_specifier: ClusterSpecifier, +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::TcpProxy; + use crate::config::common::*; + use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy as EnvoyTcpProxy; + + impl TryFrom for TcpProxy { + type Error = GenericError; + fn try_from(value: EnvoyTcpProxy) -> Result { + let EnvoyTcpProxy { + stat_prefix, + on_demand, + metadata_match, + idle_timeout, + downstream_idle_timeout, + upstream_idle_timeout, + access_log, + max_connect_attempts, + hash_policy, + tunneling_config, + max_downstream_connection_duration, + access_log_flush_interval, + flush_access_log_on_connected, + access_log_options, + cluster_specifier, + proxy_protocol_tlvs, + backoff_options, + } = value; + unsupported_field!( + // stat_prefix, + on_demand, + metadata_match, + idle_timeout, + downstream_idle_timeout, + upstream_idle_timeout, + access_log, + max_connect_attempts, + hash_policy, + tunneling_config, + max_downstream_connection_duration, + access_log_flush_interval, + flush_access_log_on_connected, + access_log_options, // cluster_specifier, + backoff_options, + proxy_protocol_tlvs + )?; + if stat_prefix.is_used() { + tracing::warn!("unsupported field stat_prefix used in tcp_proxy. This field will be ignored."); + } + let cluster_specifier = convert_opt!(cluster_specifier)?; + Ok(Self { cluster_specifier }) + } + } +} diff --git a/orion-configuration/src/config/runtime.rs b/orion-configuration/src/config/runtime.rs new file mode 100644 index 00000000..379163ef --- /dev/null +++ b/orion-configuration/src/config/runtime.rs @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use serde::{Deserialize, Serialize}; +use std::{ + env::var, + fmt::Display, + num::{NonZeroU32, NonZeroUsize}, + ops::Deref, +}; + +use crate::options::Options; + +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] +pub struct Runtime { + #[serde(default = "non_zero_num_cpus")] + pub num_cpus: NonZeroUsize, + #[serde(default = "one")] + pub num_runtimes: NonZeroU32, + #[serde(skip_serializing_if = "Option::is_none", default)] + pub global_queue_interval: Option, + #[serde(skip_serializing_if = "Option::is_none", default)] + pub affinity_strategy: Option, + //may be zero? + #[serde(skip_serializing_if = "Option::is_none", default)] + pub event_interval: Option, + #[serde(skip_serializing_if = "Option::is_none", default)] + pub max_io_events_per_tick: Option, +} + +fn one() -> NonZeroU32 { + NonZeroU32::MIN +} + +impl Runtime { + #[must_use] + pub fn update_from_env_and_options(self, opt: &Options) -> Self { + Runtime { + num_cpus: var("ORION_GATEWAY_CORES") + .ok() + .and_then(|v| v.parse::().ok()) + .or(opt.num_cpus) + .unwrap_or(self.num_cpus), + + num_runtimes: var("ORION_GATEWAY_RUNTIMES") + .ok() + .and_then(|v| v.parse::().ok()) + .or(opt.num_runtimes) + .unwrap_or(self.num_runtimes), + + global_queue_interval: var("ORION_RT_GLOBAL_QUEUE_INTERVAL") + .ok() + .and_then(|v| v.parse::().ok()) + .or(opt.global_queue_interval) + .or(self.global_queue_interval), + + event_interval: var("ORION_RT_EVENT_INTERVAL") + .ok() + .and_then(|v| v.parse::().ok()) + .or(opt.event_interval) + .or(self.event_interval), + + max_io_events_per_tick: var("ORION_RT_MAX_IO_EVENT_PER_TICK") + .ok() + .and_then(|v| v.parse::().ok()) + .or(opt.max_io_events_per_tick) + .or(self.max_io_events_per_tick), + + affinity_strategy: self.affinity_strategy, + } + } + + //upcasts to usize to make it easier to do math with it and num_cpus + pub fn num_runtimes(&self) -> usize { + self.num_runtimes.get() as usize + } + pub fn num_cpus(&self) -> usize { + self.num_cpus.get() + } +} + +pub(crate) fn non_zero_num_cpus() -> NonZeroUsize { + NonZeroUsize::try_from(num_cpus::get()).expect("found zero cpus") +} + +impl Default for Runtime { + fn default() -> Self { + Self { + num_cpus: non_zero_num_cpus(), + num_runtimes: one(), + global_queue_interval: None, + event_interval: None, + max_io_events_per_tick: None, + affinity_strategy: None, + } + } +} + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize, Serialize)] +pub struct CoreId(usize); + +impl CoreId { + pub fn new(id: usize) -> CoreId { + CoreId(id) + } +} + +impl Display for CoreId { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Deref for CoreId { + type Target = usize; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Deserialize, Serialize, PartialEq, Eq, Debug, Clone)] +#[serde(tag = "type", content = "map")] +pub enum Affinity { + #[serde(rename = "auto")] + Auto, + #[serde(rename = "nodes")] + Nodes(Vec>), + #[serde(rename = "runtimes")] + Runtimes(Vec>), +} diff --git a/orion-configuration/src/config/secret.rs b/orion-configuration/src/config/secret.rs new file mode 100644 index 00000000..3b284847 --- /dev/null +++ b/orion-configuration/src/config/secret.rs @@ -0,0 +1,193 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::config::core::DataSource; +use compact_str::CompactString; +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct Secret { + name: CompactString, + #[serde(flatten)] + kind: Type, +} + +impl Secret { + pub fn name(&self) -> &str { + &self.name + } + + pub fn kind(&self) -> &Type { + &self.kind + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Type { + TlsCertificate(TlsCertificate), + ValidationContext(ValidationContext), +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct ValidationContext { + trusted_ca: DataSource, +} + +impl ValidationContext { + pub fn trusted_ca(&self) -> &DataSource { + &self.trusted_ca + } +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct TlsCertificate { + certificate_chain: DataSource, + private_key: DataSource, +} + +impl Debug for TlsCertificate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TlsCertificate") + .field("certificate_chain", &self.certificate_chain) + .field( + "private_key", + match self.private_key() { + DataSource::Path(p) => p, + DataSource::InlineBytes(_) | DataSource::InlineString(_) => &"[censored]", + DataSource::EnvironmentVariable(env) => env, + }, + ) + .finish() + } +} + +impl TlsCertificate { + pub fn certificate_chain(&self) -> &DataSource { + &self.certificate_chain + } + pub fn private_key(&self) -> &DataSource { + &self.private_key + } +} + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{Secret, TlsCertificate, Type, ValidationContext}; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::transport_sockets::tls::v3::{ + secret::Type as EnvoyType, CertificateValidationContext as EnvoyCertificateValidationContext, + Secret as EnvoySecret, TlsCertificate as EnvoyTlsCertificate, + }; + + impl TryFrom for Secret { + type Error = GenericError; + fn try_from(envoy: EnvoySecret) -> Result { + let EnvoySecret { name, r#type } = envoy; + let name: CompactString = required!(name)?.into(); + let kind = convert_opt!(r#type, "type").with_name(name.clone())?; + Ok(Self { name, kind }) + } + } + impl TryFrom for Type { + type Error = GenericError; + fn try_from(value: EnvoyType) -> Result { + match value { + EnvoyType::TlsCertificate(envoy) => Ok(Self::TlsCertificate(envoy.try_into()?)), + EnvoyType::ValidationContext(envoy) => Ok(Self::ValidationContext(envoy.try_into()?)), + EnvoyType::GenericSecret(_) => Err(GenericError::unsupported_variant("GenericSecret")), + EnvoyType::SessionTicketKeys(_) => Err(GenericError::unsupported_variant("SessionTicketKeys")), + } + } + } + impl TryFrom for TlsCertificate { + type Error = GenericError; + fn try_from(envoy: EnvoyTlsCertificate) -> Result { + let EnvoyTlsCertificate { + certificate_chain, + private_key, + pkcs12, + watched_directory, + private_key_provider, + password, + ocsp_staple, + signed_certificate_timestamp, + } = envoy; + unsupported_field!( + // certificate_chain, + // private_key, + pkcs12, + watched_directory, + private_key_provider, + password, + ocsp_staple, + signed_certificate_timestamp + )?; + let certificate_chain = convert_opt!(certificate_chain)?; + let private_key = convert_opt!(private_key)?; + Ok(Self { certificate_chain, private_key }) + } + } + + impl TryFrom for ValidationContext { + type Error = GenericError; + fn try_from(envoy: EnvoyCertificateValidationContext) -> Result { + let EnvoyCertificateValidationContext { + trusted_ca, + ca_certificate_provider_instance, + watched_directory, + verify_certificate_spki, + verify_certificate_hash, + match_typed_subject_alt_names, + match_subject_alt_names, + require_signed_certificate_timestamp, + crl, + allow_expired_certificate, + trust_chain_verification, + custom_validator_config, + only_verify_leaf_cert_crl, + max_verify_depth, + system_root_certs, + } = envoy; + unsupported_field!( + // trusted_ca, + ca_certificate_provider_instance, + watched_directory, + verify_certificate_spki, + verify_certificate_hash, + match_typed_subject_alt_names, + match_subject_alt_names, + require_signed_certificate_timestamp, + crl, + allow_expired_certificate, + trust_chain_verification, + custom_validator_config, + only_verify_leaf_cert_crl, + max_verify_depth, + system_root_certs + )?; + let trusted_ca = convert_opt!(trusted_ca)?; + Ok(Self { trusted_ca }) + } + } +} diff --git a/orion-configuration/src/config/transport.rs b/orion-configuration/src/config/transport.rs new file mode 100644 index 00000000..fbd18e30 --- /dev/null +++ b/orion-configuration/src/config/transport.rs @@ -0,0 +1,483 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::secret::{TlsCertificate, ValidationContext}; +use crate::config::common::*; +use base64::Engine as _; +use compact_str::CompactString; +use serde::{ + de::{self, MapAccess, Visitor}, + ser::SerializeStruct, + Deserialize, Serialize, +}; +use std::{ + ffi::{CStr, CString}, + str::FromStr, +}; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct BindDevice { + /// A interface name as defined by linux SO_BINDTODEVICE + interface: CString, +} + +impl BindDevice { + pub fn interface(&self) -> &CStr { + &self.interface + } +} + +impl Serialize for BindDevice { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut serializer = serializer.serialize_struct("bind_device", 1)?; + if let Ok(interface) = self.interface.to_str() { + // we might want to loosen this restriction to allow non-ascii alphanumeric. + // but we should always deny any char that has to be escaped to print in utf8 + if interface.chars().all(|c| c.is_ascii_alphanumeric()) { + serializer.serialize_field("interface", &interface)?; + return serializer.end(); + } + } + let iface_bytes = self.interface.to_bytes(); + let bytes = base64::engine::general_purpose::STANDARD.encode(iface_bytes); + serializer.serialize_field("interface_bytes", &bytes)?; + serializer.end() + } +} + +impl<'de> Deserialize<'de> for BindDevice { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "snake_case")] + enum Field { + Interface, + InterfaceBytes, + } + + struct StructVisitor; + + impl<'de> Visitor<'de> for StructVisitor { + type Value = BindDevice; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("struct BindDevice") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut bytes = None; + while let Some(key) = map.next_key()? { + let string: String = map.next_value()?; + match key { + Field::Interface => { + if bytes.is_some() { + return Err(de::Error::duplicate_field("interface OR interface_bytes")); + } + bytes = Some(string.into_bytes()); + }, + Field::InterfaceBytes => { + if bytes.is_some() { + return Err(de::Error::duplicate_field("interface OR interface_bytes")); + } + bytes = Some(base64::engine::general_purpose::STANDARD.decode(&string).map_err(|e| { + de::Error::custom(format!("failed to decode interface_bytes as base64: {e}")) + })?); + }, + } + } + let bytes = bytes.ok_or_else(|| de::Error::missing_field("interface OR interface_bytes"))?; + + BindDevice::try_from(bytes) + .map_err(|e| de::Error::custom(format!("failed to parse bind_interface: {e}"))) + } + } + + const FIELDS: &[&str] = &["interface", "interface_bytes"]; + deserializer.deserialize_struct("BindDevice", FIELDS, StructVisitor) + } +} + +impl FromStr for BindDevice { + type Err = GenericError; + fn from_str(s: &str) -> Result { + s.as_bytes().to_vec().try_into() + } +} + +impl TryFrom> for BindDevice { + type Error = GenericError; + fn try_from(mut value: Vec) -> Result { + const IFNAMSIZE: usize = 16; + if value.last() != Some(&0u8) { + // Append NULL if missing + value.push(0); + } + + let interface = std::ffi::CString::from_vec_with_nul(value) + .map_err(|e| GenericError::from_msg_with_cause("failed to conver interface to CString", e))?; + if interface.as_bytes_with_nul().len() > IFNAMSIZE { + Err(GenericError::from_msg(format!( + "invalid interface name {}. Maximum length ({IFNAMSIZE}) exceeded", + interface.to_string_lossy() + ))) + } else { + Ok(Self { interface }) + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct CommonTlsContext { + #[serde(skip_serializing_if = "is_default", default)] + pub parameters: TlsParameters, + #[serde(flatten)] + pub secrets: Secrets, + #[serde(flatten)] + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub validation_context: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct TlsParameters { + #[serde(skip_serializing_if = "is_default_min_tls_version", default = "default_min_tls_version")] + pub minimum_protocol_version: TlsVersion, + #[serde(skip_serializing_if = "Option::is_none", default = "Default::default")] + pub maximum_protocol_version: Option, +} + +fn default_min_tls_version() -> TlsVersion { + TlsVersion::TLSv1_2 +} + +fn is_default_min_tls_version(value: &TlsVersion) -> bool { + *value == default_min_tls_version() +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +pub enum TlsVersion { + TLSv1_2, + TLSv1_3, +} + +impl TlsParameters { + pub fn supported_version(&self) -> &'static [TlsVersion] { + match self.minimum_protocol_version { + // assume that minimum <= maximum + TlsVersion::TLSv1_3 => &[TlsVersion::TLSv1_3], + TlsVersion::TLSv1_2 => match self.maximum_protocol_version { + None | Some(TlsVersion::TLSv1_3) => &[TlsVersion::TLSv1_2, TlsVersion::TLSv1_3], + Some(TlsVersion::TLSv1_2) => &[TlsVersion::TLSv1_2], + }, + } + } +} + +impl Default for TlsParameters { + fn default() -> Self { + Self { maximum_protocol_version: None, minimum_protocol_version: TlsVersion::TLSv1_2 } + } +} + +pub struct SdsConfig { + pub name: CompactString, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum Secrets { + #[serde(rename = "tls_certificates_sds")] + SdsConfig(Vec), + #[serde(rename = "tls_certificates")] + Certificates(Vec), +} + +impl Secrets { + pub fn len(&self) -> usize { + match self { + Self::Certificates(v) => v.len(), + Self::SdsConfig(v) => v.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl From> for Secrets { + fn from(value: Vec) -> Self { + Self::SdsConfig(value.into_iter().map(|x| x.name).collect()) + } +} + +impl From> for Secrets { + fn from(value: Vec) -> Self { + Self::Certificates(value) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum CommonTlsValidationContext { + #[serde(rename = "validation_context_sds")] + SdsConfig(CompactString), + ValidationContext(ValidationContext), +} + +#[cfg(feature = "envoy-conversions")] +pub(crate) use envoy_conversions::*; +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + #![allow(deprecated)] + use super::{ + BindDevice, CommonTlsContext, CommonTlsValidationContext, SdsConfig, Secrets, TlsCertificate, TlsParameters, + TlsVersion, + }; + use crate::config::common::*; + use compact_str::CompactString; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::core::v3::{socket_option::Value as EnvoySocketOptionValue, SocketOption as EnvoySocketOption}, + extensions::transport_sockets::tls::v3::{ + common_tls_context::ValidationContextType as EnvoyValidationContextType, + tls_parameters::TlsProtocol as EnvoyTlsProtocol, CommonTlsContext as EnvoyCommonTlsContext, + DownstreamTlsContext as EnvoyDownstreamTlsContext, SdsSecretConfig as EnvoySdsSecretConfig, + TlsParameters as EnvoyTlsParameters, UpstreamTlsContext as EnvoyUpstreamTlsContext, + }, + }, + google::protobuf::Any, + prost::Message, + }; + + impl BindDevice { + const fn socket_option() -> (i64, i64) { + (1, 25) + } + } + + impl TryFrom for BindDevice { + type Error = GenericError; + fn try_from(value: EnvoySocketOption) -> Result { + let EnvoySocketOption { description, level, name, state, value, r#type: _ } = value; + unsupported_field!(state)?; + // this field is + // > An optional name to give this socket option for debugging, etc. + // > Uniqueness is not required and no special meaning is assumed. + // so while we don't use it, there should be no harm in allowing it. + let _ = description; + if (level, name) == BindDevice::socket_option() { + // max interface name w/NULL (see net/if.h) + + match required!(value)? { + EnvoySocketOptionValue::BufValue(name) => name.try_into(), + EnvoySocketOptionValue::IntValue(_) => Err(GenericError::unsupported_variant("IntValue")), + } + .with_node("value") + } else { + Err(GenericError::from_msg(format!( + "unsupported level/name pair \"({level}, {name})\". Only BindDevice \"{:?}\" is supported.", + Self::socket_option() + ))) + } + } + } + + impl TryFrom for TlsParameters { + type Error = GenericError; + fn try_from(value: EnvoyTlsParameters) -> Result { + let EnvoyTlsParameters { + tls_minimum_protocol_version, + tls_maximum_protocol_version, + cipher_suites, + ecdh_curves, + signature_algorithms, + compliance_policies, + } = value; + unsupported_field!( + // tls_minimum_protocol_version, + // tls_maximum_protocol_version, + cipher_suites, + ecdh_curves, + signature_algorithms, + compliance_policies + )?; + + let tls_minimum_protocol_version = EnvoyTlsProtocol::from_i32(tls_minimum_protocol_version) + .ok_or_else(|| { + GenericError::unsupported_variant(format!( + "[unknown tls protocol variant {tls_minimum_protocol_version}]" + )) + }) + .with_node("tls_minimum_protocol_version")?; + let minimum_protocol_version = match tls_minimum_protocol_version { + EnvoyTlsProtocol::TlsAuto | EnvoyTlsProtocol::TlSv12 => TlsVersion::TLSv1_2, + EnvoyTlsProtocol::TlSv13 => TlsVersion::TLSv1_3, + EnvoyTlsProtocol::TlSv10 | EnvoyTlsProtocol::TlSv11 => { + return Err(GenericError::from_msg("TLS 1.2 is the minimum supported version")) + .with_node("tls_minimum_protocol_version") + }, + }; + let tls_maximum_protocol_version = EnvoyTlsProtocol::from_i32(tls_maximum_protocol_version) + .ok_or_else(|| { + GenericError::unsupported_variant(format!( + "[unknown tls protocol variant {tls_maximum_protocol_version}]" + )) + }) + .with_node("tls_maximum_protocol_version")?; + let maximum_protocol_version = match tls_maximum_protocol_version { + // if auto just don't set a maximum, in case TLSv1_4 is ever added + EnvoyTlsProtocol::TlsAuto => None, + EnvoyTlsProtocol::TlSv13 => Some(TlsVersion::TLSv1_3), + EnvoyTlsProtocol::TlSv12 => Some(TlsVersion::TLSv1_2), + EnvoyTlsProtocol::TlSv10 | EnvoyTlsProtocol::TlSv11 => { + return Err(GenericError::from_msg("TLS 1.2 is the minimum supported version")) + .with_node("tls_maximum_protocol_version") + }, + }; + if matches!( + (minimum_protocol_version, maximum_protocol_version), + (TlsVersion::TLSv1_3, Some(TlsVersion::TLSv1_2)) + ) { + return Err(GenericError::from_msg("minimum TLS version is newer than maximum TLS version")); + } + Ok(Self { minimum_protocol_version, maximum_protocol_version }) + } + } + + impl TryFrom for CommonTlsContext { + type Error = GenericError; + fn try_from(value: EnvoyCommonTlsContext) -> Result { + let EnvoyCommonTlsContext { + tls_params, + tls_certificates, + tls_certificate_sds_secret_configs, + tls_certificate_provider_instance, + tls_certificate_certificate_provider, + tls_certificate_certificate_provider_instance, + alpn_protocols, + custom_handshaker, + key_log, + validation_context_type, + custom_tls_certificate_selector, + } = value; + unsupported_field!( + // tls_params, + // tls_certificates, + // tls_certificate_sds_secret_configs, + tls_certificate_provider_instance, + tls_certificate_certificate_provider, + tls_certificate_certificate_provider_instance, + alpn_protocols, + custom_handshaker, + key_log, // validation_context_type, + custom_tls_certificate_selector + )?; + let parameters = tls_params.map(TlsParameters::try_from).transpose()?.unwrap_or_default(); + let certificates: Vec = convert_vec!(tls_certificates)?; + let tls_certificate_sds_secret_configs: Vec = convert_vec!(tls_certificate_sds_secret_configs)?; + let secrets = match (tls_certificate_sds_secret_configs.len(), certificates.len()) { + (0, 0) => Secrets::Certificates(Vec::new()), + (_, 0) => Secrets::from(tls_certificate_sds_secret_configs), + (0, _) => Secrets::from(certificates), + (_, _) => { + return Err(GenericError::from_msg( + "Only one of tls_certificates OR tls_certificate_sds_secret_configs may be set", + )) + }, + }; + let validation_context = validation_context_type.map(CommonTlsValidationContext::try_from).transpose()?; + Ok(Self { parameters, secrets, validation_context }) + } + } + impl TryFrom for SdsConfig { + type Error = GenericError; + fn try_from(value: EnvoySdsSecretConfig) -> Result { + let EnvoySdsSecretConfig { name, sds_config } = value; + let name: CompactString = required!(name)?.into(); + unsupported_field!(sds_config).with_name(name.clone())?; + Ok(Self { name }) + } + } + + impl TryFrom for CommonTlsValidationContext { + type Error = GenericError; + fn try_from(value: EnvoyValidationContextType) -> Result { + match value { + EnvoyValidationContextType::ValidationContext(cert_validation_ctx) => { + cert_validation_ctx.try_into().map(Self::ValidationContext) + }, + EnvoyValidationContextType::ValidationContextSdsSecretConfig(x) => { + SdsConfig::try_from(x).map(|sds| Self::SdsConfig(sds.name)) + }, + EnvoyValidationContextType::CombinedValidationContext(_) => { + Err(GenericError::unsupported_variant("CombinedValidationContext")) + }, + EnvoyValidationContextType::ValidationContextCertificateProvider(_) => { + Err(GenericError::unsupported_variant("ValidationContextCertificateProvider")) + }, + EnvoyValidationContextType::ValidationContextCertificateProviderInstance(_) => { + Err(GenericError::unsupported_variant("ValidationContextCertificateProviderInstance")) + }, + } + } + } + + pub(crate) enum SupportedEnvoyTransportSocket { + DownstreamTlsContext(EnvoyDownstreamTlsContext), + UpstreamTlsContext(EnvoyUpstreamTlsContext), + } + + impl TryFrom for SupportedEnvoyTransportSocket { + type Error = GenericError; + fn try_from(typed_config: Any) -> Result { + match typed_config.type_url.as_str() { + "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" => { + EnvoyDownstreamTlsContext::decode(typed_config.value.as_slice()) + .map(SupportedEnvoyTransportSocket::DownstreamTlsContext) + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + }, + "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" => { + EnvoyUpstreamTlsContext::decode(typed_config.value.as_slice()) + .map(SupportedEnvoyTransportSocket::UpstreamTlsContext) + .map_err(|e| { + GenericError::from_msg_with_cause( + format!("failed to parse protobuf for \"{}\"", typed_config.type_url), + e, + ) + }) + }, + s => Err(GenericError::unsupported_variant(s.to_owned())), + } + } + } +} diff --git a/orion-configuration/src/config/util.rs b/orion-configuration/src/config/util.rs new file mode 100644 index 00000000..87aaaec7 --- /dev/null +++ b/orion-configuration/src/config/util.rs @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#[cfg(feature = "envoy-conversions")] +pub(crate) use envoy_conversions::*; + +#[cfg(feature = "envoy-conversions")] +mod envoy_conversions { + use crate::{config::common::GenericError, config::WithNodeOnResult}; + use http::StatusCode; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::r#type::v3::HttpStatus as EnvoyHttpStatus, + google::protobuf::{Duration as PbDuration, UInt32Value}, + }; + use std::{fmt::Display, time::Duration}; + + pub fn u32_to_u16(value: u32) -> Result { + u16::try_from(value).map_err(|_| GenericError::from_msg(format!("failed to convert {value} to a u16"))) + } + + #[allow(clippy::needless_pass_by_value)] + pub fn envoy_u32_to_u16(value: UInt32Value) -> Result { + u32_to_u16(value.value) + } + + pub fn http_status_from + Display + Copy>(status: T) -> Result { + StatusCode::from_u16( + status.try_into().map_err(|_| GenericError::from_msg(format!("invalid status code {status}")))?, + ) + .map_err(|e| GenericError::from_msg_with_cause(format!("invalid status code {status}"), e)) + } + + pub fn duration_from_envoy(duration: PbDuration) -> Result { + match (u64::try_from(duration.seconds), u32::try_from(duration.nanos)) { + (Ok(seconds), Ok(nanos)) => Ok(Duration::new(seconds, nanos)), + (_, _) => Err(GenericError::from_msg(format!("Failed to conver {duration:?} into a Duration"))), + } + } + #[allow(clippy::needless_pass_by_value)] + pub fn http_status_from_envoy(status: EnvoyHttpStatus) -> Result { + let EnvoyHttpStatus { code } = status; + super::http_status_from(code).with_node("code") + } +} diff --git a/orion-configuration/src/lib.rs b/orion-configuration/src/lib.rs new file mode 100644 index 00000000..5cc22cea --- /dev/null +++ b/orion-configuration/src/lib.rs @@ -0,0 +1,107 @@ +#![allow(clippy::wildcard_imports)] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_error::Error; +pub(crate) type BoxedError = Box; + +pub mod config; +pub mod options; + +pub type Result = std::result::Result; + +pub trait VerifySingleIter: Iterator { + fn verify_single(self) -> ::std::result::Result; +} + +impl VerifySingleIter for I +where + I: Iterator, +{ + fn verify_single(mut self) -> ::std::result::Result { + match (self.next(), self.next()) { + (Some(first), None) => Ok(first), + (None, _) => Err("Iterator is empty".into()), + (Some(_), Some(_)) => Err("Iterator contains more than one element".into()), + } + } +} + +#[cfg(feature = "envoy-conversions")] +#[cfg(test)] +mod tests { + #[test] + fn protobuf_decode_any_conn_manager_go_control() { + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::core::v3::{ + api_config_source::ApiType::Grpc, + config_source::ConfigSourceSpecifier, + grpc_service::{EnvoyGrpc, TargetSpecifier}, + ApiConfigSource, ApiVersion, ConfigSource, GrpcService, + }, + extensions::filters::network::http_connection_manager::v3::{ + http_connection_manager::RouteSpecifier, http_filter::ConfigType, + HttpConnectionManager as EnvoyHttpConnectionManager, HttpFilter, Rds, + }, + }, + google::protobuf::Any, + prost::Message, + }; + + // This is generated by go-control plane for the default listener resource + const PAYLOAD: &[u8] = b"\x12\x04http*S\n\x0bhttp-router\"D\nBtype.googleapis.com/envoy.extensions.filters.http.router.v3.Router\x1a*\n\x1b0\x02\x12\x17\x08\x02\"\x0f\n\r\n\x0bxds_cluster8\x01@\x02\x12\x0blocal_route"; + let expected_conn_manager = EnvoyHttpConnectionManager { + stat_prefix: "http".to_owned(), + http_filters: vec![HttpFilter { + name: "http-router".to_owned(), + config_type: Some(ConfigType::TypedConfig(Any { + type_url: "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router".to_owned(), + value: vec![].into(), + })), + ..Default::default() + }], + route_specifier: Some(RouteSpecifier::Rds(Rds { + config_source: Some(ConfigSource { + resource_api_version: ApiVersion::V3.into(), + config_source_specifier: Some(ConfigSourceSpecifier::ApiConfigSource(ApiConfigSource { + api_type: Grpc.into(), + transport_api_version: ApiVersion::V3.into(), + grpc_services: vec![GrpcService { + target_specifier: Some(TargetSpecifier::EnvoyGrpc(EnvoyGrpc { + cluster_name: "xds_cluster".into(), + ..Default::default() + })), + ..Default::default() + }], + set_node_on_first_message_only: true, + ..Default::default() + })), + ..Default::default() + }), + route_config_name: "local_route".to_owned(), + })), + ..Default::default() + }; + // This one will fail to decode without checking equality + let m = EnvoyHttpConnectionManager::decode(PAYLOAD).unwrap(); + assert_eq!(m, expected_conn_manager); + } +} diff --git a/orion-configuration/src/main.rs b/orion-configuration/src/main.rs new file mode 100644 index 00000000..77145969 --- /dev/null +++ b/orion-configuration/src/main.rs @@ -0,0 +1,31 @@ +#![allow(clippy::print_stdout)] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_configuration::{config::Config, options::Options, Result}; +use orion_error::ResultExtension; + +fn main() -> Result<()> { + let config = Config::new(&Options::from_path("bootstrap.yaml"))?; + let yaml = serde_yaml::to_string(&config).context("failed to serialize orion config")?; + std::fs::write("orion.yaml", yaml.as_bytes())?; + println!("{yaml}"); + Ok(()) +} diff --git a/orion-configuration/src/options.rs b/orion-configuration/src/options.rs new file mode 100644 index 00000000..b1a9485d --- /dev/null +++ b/orion-configuration/src/options.rs @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{ + num::{NonZeroU32, NonZeroUsize}, + path::PathBuf, +}; + +use clap::Parser; + +#[derive(Debug, Clone, clap::Args)] +#[group(required = true, multiple = true)] +pub struct ConfigFiles { + #[arg(help = "Configuration file", short = 'c', long = "config")] + pub config: Option, + #[cfg(feature = "envoy-conversions")] + #[arg(help = "Override bootstrap with Envoy bootstrap from a file ", long = "with-envoy-bootstrap")] + pub bootstrap_override: Option, +} + +#[derive(Parser, Debug, Clone)] +pub struct Options { + #[clap(flatten)] + pub config_files: ConfigFiles, + #[arg(help = "Number of CPU to use", short = 'C', long = "num-cpus")] + pub num_cpus: Option, + #[arg(help = "Number of Tokio runtimes to use", short = 'R', long = "num-runtimes")] + pub num_runtimes: Option, + #[arg(help = "Tokio global queue interval (ticks)", long = "global-queue-interval")] + pub global_queue_interval: Option, + #[arg(help = "Tokio event interval (ticks)", long = "event-interval")] + pub event_interval: Option, + #[arg(help = "Tokio max. io events per ticks", long = "max-io-events-per-tick")] + pub max_io_events_per_tick: Option, + #[arg( + help = "Specify the queue length (channel) toward the clusters manager", + long = "clusters-manager-queue-length" + )] + pub clusters_manager_queue_length: Option, + #[arg( + help = "Comma delimited non-empty list of core ids to use for worker nodes", + long = "core-ids", + num_args = 1.., + value_delimiter = ',', + )] + pub core_ids: Option>, +} + +impl Options { + pub fn parse_options() -> Self { + Options::parse() + } + + pub fn from_path(path: impl Into) -> Self { + Self { + config_files: ConfigFiles { config: Some(path.into()), bootstrap_override: None }, + num_cpus: None, + num_runtimes: None, + global_queue_interval: None, + event_interval: None, + max_io_events_per_tick: None, + clusters_manager_queue_length: None, + core_ids: None, + } + } + pub fn from_path_to_envoy(path: impl Into) -> Self { + Self { + config_files: ConfigFiles { config: None, bootstrap_override: Some(path.into()) }, + num_cpus: None, + num_runtimes: None, + global_queue_interval: None, + event_interval: None, + max_io_events_per_tick: None, + clusters_manager_queue_length: None, + core_ids: None, + } + } +} diff --git a/orion-configuration/tests/config.yaml b/orion-configuration/tests/config.yaml new file mode 100644 index 00000000..e69de29b diff --git a/orion-configuration/tests/config_bad.yaml b/orion-configuration/tests/config_bad.yaml new file mode 100644 index 00000000..86ca9d20 --- /dev/null +++ b/orion-configuration/tests/config_bad.yaml @@ -0,0 +1,2 @@ + +runtime: 44 diff --git a/orion-configuration/tests/test_configuration.rs b/orion-configuration/tests/test_configuration.rs new file mode 100644 index 00000000..85f16463 --- /dev/null +++ b/orion-configuration/tests/test_configuration.rs @@ -0,0 +1,13 @@ +use orion_configuration::config::{deserialize_yaml, Config}; +use std::path::PathBuf; + +#[test] +fn empty_config() { + let _cfg: Config = deserialize_yaml(&PathBuf::from("tests/config.yaml")).unwrap(); +} + +#[test] +fn bad_config() { + let r: Result = deserialize_yaml(&PathBuf::from("tests/config_bad.yaml")); + assert!(r.is_err()); +} diff --git a/orion-data-plane-api/Cargo.toml b/orion-data-plane-api/Cargo.toml new file mode 100644 index 00000000..92a86ece --- /dev/null +++ b/orion-data-plane-api/Cargo.toml @@ -0,0 +1,24 @@ +[package] +authors = ["x30050848 "] +description = "Orion proxy dynamic control-planes adapt to Envoy xDS library" +edition = "2021" +license = "MIT" +name = "orion-data-plane-api" +version = "0.1.9" + +[dependencies] +anyhow = "1" +async-stream = "^0.3.6" +envoy-data-plane-api.workspace = true +futures.workspace = true +serde.workspace = true +serde_yaml.workspace = true +thiserror.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tokio-util = "0.7.13" +tower.workspace = true +tracing.workspace = true + +[dev-dependencies] +hyper-util.workspace = true diff --git a/orion-data-plane-api/src/bootstrap_loader/bootstrap.rs b/orion-data-plane-api/src/bootstrap_loader/bootstrap.rs new file mode 100644 index 00000000..899f1c46 --- /dev/null +++ b/orion-data-plane-api/src/bootstrap_loader/bootstrap.rs @@ -0,0 +1,318 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::decode::decode_any_type; +use std::collections::HashSet; +use std::fs; +use std::path; + +use crate::envoy_validation::FilterValidation; +use envoy_data_plane_api::envoy::config::bootstrap::v3::bootstrap::DynamicResources; +use envoy_data_plane_api::envoy::config::bootstrap::v3::Bootstrap; +use envoy_data_plane_api::envoy::config::cluster::v3::Cluster; +use envoy_data_plane_api::envoy::config::core::v3::address; +use envoy_data_plane_api::envoy::config::core::v3::config_source::ConfigSourceSpecifier; +use envoy_data_plane_api::envoy::config::core::v3::grpc_service::TargetSpecifier; +use envoy_data_plane_api::envoy::config::core::v3::{ApiConfigSource, SocketAddress}; +use envoy_data_plane_api::envoy::config::endpoint::v3::lb_endpoint::HostIdentifier; +use envoy_data_plane_api::envoy::config::endpoint::v3::Endpoint; +use envoy_data_plane_api::envoy::config::listener::v3::filter::ConfigType; +use envoy_data_plane_api::envoy::config::listener::v3::Listener; +use envoy_data_plane_api::envoy::config::route::v3::RouteConfiguration; +use envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::http_connection_manager::RouteSpecifier; +use envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; + +use crate::xds::model::TypeUrl; + +const EXT_HTTP_CONN_MANAGER: &str = + "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; + +#[derive(Clone)] +pub struct BootstrapLoader { + bootstrap: Option, +} + +impl BootstrapLoader { + pub fn load>(config_path: P) -> BootstrapLoader { + let mut loader = BootstrapLoader { bootstrap: None }; + let config_contents = match fs::read_to_string(config_path) { + Ok(content) => content, + Err(_) => return loader, + }; + + match crate::decode::from_yaml::(&config_contents) { + Ok(bootstrap) => { + loader.bootstrap = Some(bootstrap.clone()); + }, + Err(_) => return loader, + }; + + loader + } +} + +impl From for BootstrapLoader { + fn from(value: Bootstrap) -> Self { + BootstrapLoader { bootstrap: Some(value) } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct XdsConfig(pub XdsType, pub SocketAddress); + +#[derive(Debug, Clone, PartialEq)] +pub enum XdsType { + Aggregated(HashSet), + Individual(TypeUrl), +} + +impl From for XdsType { + fn from(url: TypeUrl) -> Self { + XdsType::Individual(url) + } +} + +#[derive(Debug)] +pub enum BootstrapResolveErr { + InvalidEndpoint, + InvalidListener, + InvalidHttpManager, + InvalidYaml, + InvalidSocketAddress, + MissingRdsConfigSource, +} + +pub trait BootstrapResolver { + fn get_static_listener_configs(&self) -> Result, BootstrapResolveErr>; + fn get_static_route_configs(&self) -> Result, BootstrapResolveErr>; + fn get_static_cluster_configs(&self) -> Result, BootstrapResolveErr>; + #[deprecated] + fn get_xds_configs(&self) -> Result, BootstrapResolveErr>; + fn get_ads_configs(&self) -> Result, BootstrapResolveErr>; +} + +impl BootstrapLoader { + fn get_endpoint_by_name(&self, name: String) -> Result { + let host_identifier = self + .bootstrap + .as_ref() + .and_then(|b| b.static_resources.as_ref()) + .and_then(|static_resources| static_resources.clusters.iter().find(|&cluster| cluster.name == name)) + .and_then(|cluster| cluster.load_assignment.as_ref()) + // TODO: support multi endpoints with weights through load balancing + .and_then(|load_assignment| load_assignment.endpoints.first()) + .and_then(|endpoints| endpoints.lb_endpoints.first()) + .and_then(|endpoint| endpoint.host_identifier.as_ref()); + + let Some(HostIdentifier::Endpoint(endpoint_any)) = host_identifier else { + return Err(BootstrapResolveErr::InvalidEndpoint); + }; + Ok(endpoint_any.clone()) + } + + fn resolve_api_config(&self, api_config_source: &ApiConfigSource) -> Option { + let target_specifier = + api_config_source.grpc_services.first().and_then(|grpc_service| grpc_service.target_specifier.as_ref()); + + if let Some(TargetSpecifier::EnvoyGrpc(grpc_conf)) = target_specifier { + let address = self + .get_endpoint_by_name(grpc_conf.cluster_name.clone()) + .ok() + .and_then(|endpoint| endpoint.address) + .and_then(|address| address.address); + + if let Some(address::Address::SocketAddress(socket)) = address { + Some(socket) + } else { + None + } + } else { + None + } + } + + fn resolve_config_source(&self, config_source: Option<&ConfigSourceSpecifier>) -> Option { + if let Some(ConfigSourceSpecifier::ApiConfigSource(api_config_source)) = config_source { + self.resolve_api_config(api_config_source) + } else { + None + } + } + + fn get_dynamic_resource(&self) -> Option<&DynamicResources> { + self.bootstrap.as_ref().and_then(|bootstrap| bootstrap.dynamic_resources.as_ref()) + } + + fn get_lds_source(&self) -> Option<&ConfigSourceSpecifier> { + self.get_dynamic_resource() + .and_then(|dynamic_resources| dynamic_resources.lds_config.as_ref()) + .and_then(|config_source| config_source.config_source_specifier.as_ref()) + } + + fn get_cds_source(&self) -> Option<&ConfigSourceSpecifier> { + self.bootstrap + .as_ref() + .and_then(|bootstrap| bootstrap.dynamic_resources.as_ref()) + .and_then(|dynamic_resources| dynamic_resources.cds_config.as_ref()) + .and_then(|config_source| config_source.config_source_specifier.as_ref()) + } + + fn get_rds_sources(&self) -> Result, BootstrapResolveErr> { + self.bootstrap + .as_ref() + .and_then(|bootstrap| bootstrap.static_resources.as_ref()) + .map(|static_resource| static_resource.listeners.as_slice()) + .unwrap_or(&[]) + .iter() + .flat_map(|listener| { + listener.filter_chains.iter().flat_map(|filter_chain| { + filter_chain.filters.iter().filter_map( + |filter| -> Option> { + filter + .get_http_connection_manager() + .map_err(|_e| BootstrapResolveErr::InvalidYaml) + .ok()? + .and_then( + |http_connection_manager: HttpConnectionManager| match http_connection_manager + .route_specifier + { + Some(RouteSpecifier::Rds(rds)) => rds + .config_source + .and_then(|config_source| config_source.config_source_specifier.clone()) + .ok_or(BootstrapResolveErr::MissingRdsConfigSource) + .into(), + _ => None, + }, + ) + .ok_or(()) + .ok() + }, + ) + }) + }) + .collect::, BootstrapResolveErr>>() + } + + fn parse_ads(&self, ads_config: &ApiConfigSource) -> Result { + let ads_host = self.resolve_api_config(ads_config).ok_or(BootstrapResolveErr::InvalidSocketAddress)?; + let mut ads_types = HashSet::new(); + + if let Some(ConfigSourceSpecifier::Ads(_)) = self.get_lds_source() { + ads_types.insert(TypeUrl::Listener); + } + if let Some(ConfigSourceSpecifier::Ads(_)) = self.get_cds_source() { + ads_types.insert(TypeUrl::Cluster); + } + + for conf_source in self.get_rds_sources()? { + if let ConfigSourceSpecifier::Ads(_) = conf_source { + ads_types.insert(TypeUrl::RouteConfiguration); + break; + } + } + + Ok(XdsConfig(XdsType::Aggregated(ads_types), ads_host)) + } +} + +impl BootstrapResolver for BootstrapLoader { + fn get_static_listener_configs(&self) -> Result, BootstrapResolveErr> { + self.bootstrap + .as_ref() + .and_then(|bootstrap| bootstrap.static_resources.as_ref()) + .map(|static_resources| static_resources.listeners.clone()) + .ok_or(BootstrapResolveErr::InvalidListener) + } + + fn get_static_route_configs(&self) -> Result, BootstrapResolveErr> { + let mut res = Vec::new(); + for listener in self + .bootstrap + .as_ref() + .unwrap() + .static_resources + .as_ref() + .map(|static_resource| static_resource.listeners.as_slice()) + .unwrap_or(&[]) + { + for filter_chain in &listener.filter_chains { + for filter in &filter_chain.filters { + let config_type = filter.config_type.as_ref().unwrap(); + if let ConfigType::TypedConfig(filter_any) = config_type { + if filter_any.type_url != EXT_HTTP_CONN_MANAGER { + continue; + } + let http_manager: HttpConnectionManager = decode_any_type(filter_any, EXT_HTTP_CONN_MANAGER) + .map_err(|_e| BootstrapResolveErr::InvalidHttpManager)?; + if let Some(RouteSpecifier::RouteConfig(route_config)) = http_manager.route_specifier { + res.push(route_config); + } + } + } + } + } + Ok(res) + } + + fn get_static_cluster_configs(&self) -> Result, BootstrapResolveErr> { + Ok(self + .bootstrap + .as_ref() + .and_then(|bootstrap| bootstrap.static_resources.as_ref()) + .map(|static_resources| static_resources.clusters.as_slice()) + .unwrap_or(&[]) + .to_vec()) + } + + fn get_xds_configs(&self) -> Result, BootstrapResolveErr> { + let mut res = Vec::new(); + if let Some(ads_config) = + self.get_dynamic_resource().and_then(|dynamic_resources| dynamic_resources.ads_config.as_ref()) + { + res.push(self.parse_ads(ads_config)?); + } + if let Some(socket) = self.resolve_config_source(self.get_lds_source()) { + res.push(XdsConfig(XdsType::from(TypeUrl::Listener), socket)); + } + if let Some(socket) = self.resolve_config_source(self.get_cds_source()) { + res.push(XdsConfig(XdsType::from(TypeUrl::Cluster), socket)); + } + for conf_source in self.get_rds_sources()? { + if let Some(socket) = self.resolve_config_source(Some(&conf_source)) { + res.push(XdsConfig(XdsType::from(TypeUrl::RouteConfiguration), socket)); + } + } + Ok(res) + } + + fn get_ads_configs(&self) -> Result, BootstrapResolveErr> { + let mut res = Vec::new(); + if let Some(ads_config) = + self.get_dynamic_resource().and_then(|dynamic_resources| dynamic_resources.ads_config.as_ref()) + { + ads_config.grpc_services.iter().for_each(|service| { + if let Some(TargetSpecifier::EnvoyGrpc(envoy_grpc)) = service.clone().target_specifier { + res.push(envoy_grpc.cluster_name); + } + }) + } + Ok(res) + } +} diff --git a/orion-data-plane-api/src/bootstrap_loader/mod.rs b/orion-data-plane-api/src/bootstrap_loader/mod.rs new file mode 100644 index 00000000..94679479 --- /dev/null +++ b/orion-data-plane-api/src/bootstrap_loader/mod.rs @@ -0,0 +1,21 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod bootstrap; diff --git a/orion-data-plane-api/src/decode.rs b/orion-data-plane-api/src/decode.rs new file mode 100644 index 00000000..3130de73 --- /dev/null +++ b/orion-data-plane-api/src/decode.rs @@ -0,0 +1,198 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use envoy_data_plane_api::{ + google::protobuf::Any, + prost::DecodeError, + prost::{Message, Name}, + prost_reflect::{DescriptorPool, DynamicMessage}, +}; +use serde::de::Error; + +#[derive(Debug, thiserror::Error)] +pub enum DecodeAnyError { + #[error("Failed to decode protobuf extension type({0})")] + ProtobufError(&'static str, #[source] DecodeError), + #[error("Failed to decode yaml extension type({0})")] + YamlError(#[from] serde_yaml::Error), +} + +/// Decode yaml string as a protobuf message +/// +/// Note: this is potentially expensive in that it will parse the protobuf +/// descriptor before actually parsing the yaml. A faster alternative would +/// cache this step in a lazy cell +pub fn from_serde_deserializer<'de, T, D>(deserializer: D) -> std::result::Result +where + D: serde::Deserializer<'de>, + T: Message + Name + Default, +{ + let pool = DescriptorPool::decode(envoy_data_plane_api::FILE_DESCRIPTOR_SET_BYTES).map_err(D::Error::custom)?; + let name = T::full_name(); + let message_descriptor = pool.get_message_by_name(&name).ok_or(D::Error::custom(name))?; + let dynmsg = DynamicMessage::deserialize(message_descriptor, deserializer)?; + dynmsg.transcode_to().map_err(D::Error::custom) +} + +pub fn from_yaml(inp: &str) -> Result +where + T: Message + Name + Default, +{ + let deserializer = serde_yaml::Deserializer::from_str(inp); + from_serde_deserializer(deserializer).map_err(Into::into) +} + +/// Decode payload from Any type into a generic return type. +/// +/// - err_desc is used in the returned error message. +/// +/// .type_url is not checked, it is up to the caller to verify it +/// matches the output type +pub fn decode_any_type(any: &Any, err_desc: &'static str) -> Result +where + R: Message + Default, +{ + R::decode(any.value.as_slice()).map_err(|e| DecodeAnyError::ProtobufError(err_desc, e)) +} + +#[cfg(test)] +mod tests { + use super::{decode_any_type, from_yaml, Any}; + use envoy_data_plane_api::envoy::config::listener::v3::filter::ConfigType; + use envoy_data_plane_api::envoy::config::listener::v3::Listener; + use envoy_data_plane_api::envoy::config::listener::v3::{Filter, FilterChain}; + use envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; + use envoy_data_plane_api::prost::Message; + + fn expected_conn_manager() -> HttpConnectionManager { + HttpConnectionManager { server_name: "name".to_string(), ..Default::default() } + } + + /// In a previous version there was a different impl of the Any type was + /// used that would be deserialized directly from yaml. This introduced a bug + /// where depending on the source the decoding would differ. This is no longer + /// the case, so the asserts in this test were inverted (but the comments remain). + #[test] + fn protobuf_decode_any_conn_manager() { + // protobuf - Message TestInner { string name = 10; } + // same as the envoy HttpConnectionManager server_name + // (api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto) + const PAYLOAD: &[u8] = b"R\x04name"; + let v = Any { type_url: "url".into(), value: PAYLOAD.to_vec().into() }; + let m: HttpConnectionManager = decode_any_type(&v, "---").unwrap(); + assert_eq!(m, expected_conn_manager()); + + // Counter proof + let y: Result = decode_any_type(&v, "---"); + //assert!(y.is_err(), "Yaml decoder cannot handle protobuf data"); + assert!(y.is_ok()); + } + + const YAML_PAYLOAD_LISTEN_FILTER: &str = r#" +name: name +filterChains: + - filters: + - name: envoy.filters.network.http_connection_manager + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + server_name: name + "#; + + fn prost_payload_listen_filter() -> Vec { + let http_man = HttpConnectionManager { server_name: "name".to_string(), ..Default::default() }; + + let any = Any { + type_url: + "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + .to_string(), + value: http_man.encode_to_vec().into(), + }; + + let filter = Filter { + name: "envoy.filters.network.http_connection_manager".to_string(), + config_type: Some(ConfigType::TypedConfig(any)), + }; + + let fc = FilterChain { filters: vec![filter], ..Default::default() }; + + Listener { name: "name".to_string(), filter_chains: vec![fc], ..Default::default() }.encode_to_vec() + } + + /// In a previous version there was a different impl of the Any type was + /// used that would be deserialized directly from yaml. This introduced a bug + /// where depending on the source the decoding would differ. This is no longer + /// the case, so the asserts in this test were inverted (but the comments remain). + #[test] + fn yaml_and_prost_eq() { + let l_yaml: Listener = from_yaml(YAML_PAYLOAD_LISTEN_FILTER).unwrap(); + let l_pb: Listener = Listener::decode(prost_payload_listen_filter().as_slice()).unwrap(); + + //assert_ne!(l_yaml, l_pb, "yaml decoded obj is different from prost obj"); + assert_eq!(l_yaml, l_pb); + + { + let mut l_yaml2 = l_yaml.clone(); + l_yaml2.filter_chains[0].filters[0].config_type = l_pb.filter_chains[0].filters[0].config_type.clone(); + assert_eq!(l_yaml2, l_pb, "Difference is the any type"); + } + + let ConfigType::TypedConfig(any_yaml) = l_yaml.filter_chains[0].filters[0].config_type.as_ref().unwrap() else { + panic!("Expecting TypedConfig in yaml"); + }; + let ConfigType::TypedConfig(any_pb) = l_pb.filter_chains[0].filters[0].config_type.as_ref().unwrap() else { + panic!("Expecting TypedConfig in pb"); + }; + + assert_eq!(any_yaml.type_url, any_pb.type_url, "Any type urls are the same"); + + let man_yaml: HttpConnectionManager = decode_any_type(any_yaml, "").unwrap(); + let man_pb: HttpConnectionManager = decode_any_type(any_pb, "").unwrap(); + assert_eq!(man_yaml, man_pb, "Both http managers are identical after decoding QED"); + + let wrong_pb: HttpConnectionManager = decode_any_type(any_yaml, "").unwrap(); + //assert_ne!( + // wrong_pb, man_yaml, + // "Decoding yaml any via prost works, but is wrong" + //); + assert_eq!(wrong_pb, man_yaml); + + let wrong_yaml: Result = decode_any_type(any_pb, ""); + //assert!( + // wrong_yaml.is_err(), + // "Decoding prost any via serde_yaml fails" + //); + assert!(wrong_yaml.is_ok()); + } + + #[test] + fn decode_errors() { + const BAD_EXT: &str = r#" +name: name +filterChains: + - filters: + - name: envoy.filters.network.http_connection_manager + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + server_name2: name + "#; + + from_yaml::(BAD_EXT).unwrap_err(); + } +} diff --git a/orion-data-plane-api/src/envoy_validation/mod.rs b/orion-data-plane-api/src/envoy_validation/mod.rs new file mode 100644 index 00000000..d6040d80 --- /dev/null +++ b/orion-data-plane-api/src/envoy_validation/mod.rs @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::decode::decode_any_type; +use envoy_data_plane_api::envoy::config::cluster::v3::Cluster; +use envoy_data_plane_api::envoy::config::core::v3::transport_socket; +use envoy_data_plane_api::envoy::config::listener::v3::{Filter, FilterChain}; +use envoy_data_plane_api::envoy::config::route::v3::Route; +use envoy_data_plane_api::envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit; +use envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager; +use envoy_data_plane_api::envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext; +use envoy_data_plane_api::envoy::extensions::upstreams::http::v3::HttpProtocolOptions; + +type Result = std::result::Result; + +pub trait FilterChainValidation { + /// Decode TLS context settings from filter chain transport socket. + /// May fail due to decoding errors + fn get_downstream_tls_context(&self) -> Result>; +} + +const T_EXT_TLS_DOWNSTREAM_CONTEXT: &str = + "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; + +impl FilterChainValidation for FilterChain { + fn get_downstream_tls_context(&self) -> Result> { + self.transport_socket + .as_ref() + .and_then(|s| s.config_type.as_ref()) + .map(|cfg| { + let transport_socket::ConfigType::TypedConfig(ref any) = cfg; + any + }) + .filter(|any| any.type_url == T_EXT_TLS_DOWNSTREAM_CONTEXT) + .map(|any| decode_any_type(any, T_EXT_TLS_DOWNSTREAM_CONTEXT)) + .transpose() + } +} + +const T_EXT_HTTP_CONN_MANAGER: &str = + "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; + +pub trait FilterValidation { + /// Get the HTTP connection manager extension associated with this filter if any. + /// Fails on decoding errors. + fn get_http_connection_manager(&self) -> Result>; +} + +impl FilterValidation for Filter { + fn get_http_connection_manager(&self) -> Result> { + use envoy_data_plane_api::envoy::config::listener::v3::filter::ConfigType; + self.config_type + .as_ref() + .and_then(|cfg| match cfg { + ConfigType::TypedConfig(ref any) => Some(any), + ConfigType::ConfigDiscovery(_) => None, + }) + .filter(|any| any.type_url == T_EXT_HTTP_CONN_MANAGER) + .map(|any| decode_any_type(any, T_EXT_HTTP_CONN_MANAGER)) + .transpose() + } +} + +const T_EXT_HTTP_PROTO_OPTIONS: &str = "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"; + +pub trait ClusterValidation { + fn get_http_protocol_options(&self) -> Result>; +} + +impl ClusterValidation for Cluster { + fn get_http_protocol_options(&self) -> Result> { + self.typed_extension_protocol_options + // Same as T_EXT_HTTP_PROTO_OPTIONS sans prefix + .get("envoy.extensions.upstreams.http.v3.HttpProtocolOptions") + .as_ref() + .filter(|any| any.type_url == T_EXT_HTTP_PROTO_OPTIONS) + .map(|any| decode_any_type(any, T_EXT_HTTP_PROTO_OPTIONS)) + .transpose() + } +} + +const T_EXT_HTTP_LOCAL_RATELIMIT: &str = + "type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit"; + +pub trait LocalRateLimitValidation { + fn get_local_ratelimit(&self) -> Result>; +} + +impl LocalRateLimitValidation for Route { + fn get_local_ratelimit(&self) -> Result> { + self.typed_per_filter_config + // Same as T_EXT_HTTP_LOCAL_RATELIMIT sans prefix + .get("envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit") + .as_ref() + .filter(|any| any.type_url == T_EXT_HTTP_LOCAL_RATELIMIT) + .map(|any| decode_any_type(any, T_EXT_HTTP_LOCAL_RATELIMIT)) + .transpose() + } +} diff --git a/orion-data-plane-api/src/lib.rs b/orion-data-plane-api/src/lib.rs new file mode 100644 index 00000000..12ef9f84 --- /dev/null +++ b/orion-data-plane-api/src/lib.rs @@ -0,0 +1,27 @@ +#![warn(clippy::unwrap_used)] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +#![warn(clippy::expect_used)] +#![warn(clippy::panic)] +pub mod bootstrap_loader; +pub mod decode; +pub mod envoy_validation; +pub mod xds; +pub use envoy_data_plane_api; diff --git a/orion-data-plane-api/src/xds/bindings.rs b/orion-data-plane-api/src/xds/bindings.rs new file mode 100644 index 00000000..1d9a05de --- /dev/null +++ b/orion-data-plane-api/src/xds/bindings.rs @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::future::Future; +use std::pin::Pin; + +use super::model; +use envoy_data_plane_api::envoy::service::cluster::v3::cluster_discovery_service_client::ClusterDiscoveryServiceClient; +use envoy_data_plane_api::envoy::service::discovery::v3::aggregated_discovery_service_client::AggregatedDiscoveryServiceClient; +use envoy_data_plane_api::envoy::service::discovery::v3::{DeltaDiscoveryRequest, DeltaDiscoveryResponse}; +use envoy_data_plane_api::envoy::service::endpoint::v3::endpoint_discovery_service_client::EndpointDiscoveryServiceClient; +use envoy_data_plane_api::envoy::service::listener::v3::listener_discovery_service_client::ListenerDiscoveryServiceClient; +use envoy_data_plane_api::envoy::service::route::v3::route_discovery_service_client::RouteDiscoveryServiceClient; +use envoy_data_plane_api::envoy::service::secret::v3::secret_discovery_service_client::SecretDiscoveryServiceClient; +use model::TypeUrl; + +use envoy_data_plane_api::tonic; +use tokio_stream::Stream; +use tonic::transport::Channel; + +pub type DeltaFuture<'a> = Pin< + Box< + dyn Future< + Output = std::result::Result< + tonic::Response>, + tonic::Status, + >, + > + Send + + 'a, + >, +>; + +/// Abstracts over the variation in generated xDS clients +pub trait TypedXdsBinding { + fn type_url() -> Option; + fn delta_request(&mut self, request: impl Stream + Send + 'static) + -> DeltaFuture<'_>; +} + +/// Handle to ADS client +pub struct AggregatedDiscoveryType { + pub underlying_client: AggregatedDiscoveryServiceClient, +} + +impl TypedXdsBinding for AggregatedDiscoveryType { + fn type_url() -> Option { + None + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_aggregated_resources(request)) + } +} + +/// Handle to CDS client +pub struct ClusterDiscoveryType { + pub underlying_client: ClusterDiscoveryServiceClient, +} + +impl TypedXdsBinding for ClusterDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Cluster) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_clusters(request)) + } +} + +/// Handle to LDS Client +pub struct ListenerDiscoveryType { + pub underlying_client: ListenerDiscoveryServiceClient, +} + +impl TypedXdsBinding for ListenerDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Listener) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_listeners(request)) + } +} + +/// Handle to RDS Client +pub struct RouteDiscoveryType { + pub underlying_client: RouteDiscoveryServiceClient, +} + +impl TypedXdsBinding for RouteDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::RouteConfiguration) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_routes(request)) + } +} + +/// Handle to EDS Client +pub struct EndpointDiscoveryType { + pub underlying_client: EndpointDiscoveryServiceClient, +} + +impl TypedXdsBinding for EndpointDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::ClusterLoadAssignment) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_endpoints(request)) + } +} + +/// Handle to SDS Client +pub struct SecretsDiscoveryType { + pub underlying_client: SecretDiscoveryServiceClient, +} + +impl TypedXdsBinding for SecretsDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Secret) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaFuture<'_> { + Box::pin(self.underlying_client.delta_secrets(request)) + } +} diff --git a/orion-data-plane-api/src/xds/client.rs b/orion-data-plane-api/src/xds/client.rs new file mode 100644 index 00000000..b3cd09b7 --- /dev/null +++ b/orion-data-plane-api/src/xds/client.rs @@ -0,0 +1,419 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::bindings; +use super::model::{ + RejectedConfig, ResourceId, ResourceVersion, TypeUrl, XdsError, XdsResourcePayload, XdsResourceUpdate, +}; +use envoy_data_plane_api::envoy::config::core::v3::Node; +use envoy_data_plane_api::envoy::service::discovery::v3::{DeltaDiscoveryRequest, DeltaDiscoveryResponse}; +use envoy_data_plane_api::google::rpc::Status; +use envoy_data_plane_api::tonic; +use std::collections::{HashMap, HashSet}; +use std::time::Duration; + +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::time; +use tracing::{debug, info, warn}; + +pub struct DiscoveryClientBuilder { + node: Node, + client_binding: C, + initial_subscriptions: HashMap>, + error: Option, +} + +impl DiscoveryClientBuilder +where + C: bindings::TypedXdsBinding, +{ + pub fn new(node: Node, client: C) -> DiscoveryClientBuilder { + DiscoveryClientBuilder { node, client_binding: client, initial_subscriptions: HashMap::new(), error: None } + } + + pub fn subscribe_resource_name(mut self, resource_id: ResourceId) -> Self { + if let Some(type_url) = C::type_url() { + self = self.subscribe_resource_name_by_typeurl(resource_id, type_url); + } else { + self.error = Some("subscribe only works if typed binding provides a compatible type_url".to_string()); + } + self + } + + pub fn subscribe_resource_name_by_typeurl(mut self, resource_id: ResourceId, type_url: TypeUrl) -> Self { + let configured_type_url = C::type_url(); + if configured_type_url.is_none() || configured_type_url.is_some_and(|type_is_set| type_is_set == type_url) { + self.initial_subscriptions.entry(type_url).or_default().insert(resource_id); + } else { + self.error = Some("can only subscribe by type_url when using a compatible typed binding".to_string()); + } + self + } + + pub fn build(self) -> Result<(DeltaClientBackgroundWorker, DeltaDiscoveryClient), XdsError> { + if let Some(err) = self.error { + Err(XdsError::BuilderFailed(err)) + } else { + let (subscription_updates_tx, subscription_updates_rx) = mpsc::channel::(100); + let (resource_updates_tx, resource_updates_rx) = mpsc::channel::(100); + Ok(( + DeltaClientBackgroundWorker { + node: self.node, + client_binding: self.client_binding, + initial_subscriptions: self.initial_subscriptions, + subscriptions_rx: subscription_updates_rx, + resources_tx: resource_updates_tx, + }, + DeltaDiscoveryClient { subscriptions_tx: subscription_updates_tx, resources_rx: resource_updates_rx }, + )) + } + } +} + +/// Incremental Client that operates the delta version of the xDS protocol +/// use to consume xDS configuration updates asychronously, modify resource subscriptions +pub struct DeltaDiscoveryClient { + subscriptions_tx: mpsc::Sender, + resources_rx: mpsc::Receiver, +} + +impl DeltaDiscoveryClient { + pub async fn recv(&mut self) -> Option { + self.resources_rx.recv().await + } + + #[cfg(test)] + pub async fn try_recv(&mut self) -> Result { + self.resources_rx.try_recv() + } + + pub async fn subscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> anyhow::Result<()> { + Ok(self.subscriptions_tx.send(SubscriptionEvent::Subscribe(type_url, resource_id)).await?) + } + + pub async fn unsubscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> anyhow::Result<()> { + Ok(self.subscriptions_tx.send(SubscriptionEvent::Unsubscribe(type_url, resource_id)).await?) + } +} + +#[derive(Debug)] +pub struct XdsUpdateEvent { + pub updates: Vec, + pub ack_channel: oneshot::Sender>, +} + +#[derive(Clone, Debug)] +pub enum SubscriptionEvent { + Subscribe(TypeUrl, ResourceId), + Unsubscribe(TypeUrl, ResourceId), +} + +/// Background worker that handles interactions with remote xDS services +pub struct DeltaClientBackgroundWorker { + node: Node, + client_binding: C, + initial_subscriptions: HashMap>, + subscriptions_rx: mpsc::Receiver, + resources_tx: mpsc::Sender, +} + +impl DeltaClientBackgroundWorker { + pub async fn run(&mut self) -> anyhow::Result<()> { + let mut connection_id = 0; + + let mut state = DiscoveryClientState { + backoff: Duration::from_millis(50), + tracked: HashMap::new(), + subscriptions: self.initial_subscriptions.clone(), + }; + loop { + connection_id += 1; + debug!(connection_id, "starting xDS (re)connect cycle"); + self.persistently_connect(&mut state).await; + } + } +} + +#[derive(Debug)] +struct DiscoveryClientState { + backoff: Duration, + tracked: HashMap>, + subscriptions: HashMap>, +} + +impl DeltaClientBackgroundWorker { + async fn persistently_connect(&mut self, state: &mut DiscoveryClientState) { + const MAX_BACKOFF: Duration = Duration::from_secs(20); + let backoff = std::cmp::min(MAX_BACKOFF, state.backoff * 2); + let backoff_slowly = backoff + Duration::from_millis(50); + + match self.stream_resources(state).await { + Err(ref e @ XdsError::GrpcStatus(ref status)) => { + let err_detail = e.to_string(); + if status.code() == tonic::Code::Unknown + || status.code() == tonic::Code::Cancelled + || status.code() == tonic::Code::DeadlineExceeded + || status.code() == tonic::Code::Unavailable + { + warn!("xDS client terminated: {}, retrying in {:?}", err_detail, backoff); + } else { + warn!("xDS client interupted: {}, retrying in {:?}", err_detail, backoff); + } + tokio::time::sleep(backoff).await; + state.backoff = backoff; + }, + Err(e) => { + warn!("xDS client error: {}, retrying in {:?}", e, backoff_slowly); + tokio::time::sleep(backoff_slowly).await; + state.backoff = backoff_slowly; + }, + Ok(_) => { + warn!("xDS client closed"); + state.backoff = Duration::from_millis(50) + }, + } + } + + async fn stream_resources(&mut self, state: &mut DiscoveryClientState) -> anyhow::Result<(), XdsError> { + let (discovery_requests_tx, mut discovery_requests_rx) = mpsc::channel::(100); + + let resource_types = match C::type_url() { + Some(type_url) => vec![type_url], + _ => vec![ + TypeUrl::Secret, + TypeUrl::ClusterLoadAssignment, + TypeUrl::Cluster, + TypeUrl::RouteConfiguration, + TypeUrl::Listener, + ], + }; + let initial_requests: Vec = resource_types + .iter() + .map(|resource_type| { + let subscriptions = state.subscriptions.get(resource_type).cloned().unwrap_or_default(); + let already_tracked: HashMap = + state.tracked.get(resource_type).cloned().unwrap_or_default(); + DeltaDiscoveryRequest { + node: Some(self.node.clone()), + type_url: resource_type.to_string(), + initial_resource_versions: already_tracked, + resource_names_subscribe: subscriptions.into_iter().collect(), + ..Default::default() + } + }) + .collect(); + + let outbound_requests = async_stream::stream! { + for request in initial_requests { + yield request; + } + while let Some(message) = discovery_requests_rx.recv().await { + debug!( + type_url = message.type_url, + "sending discovery request" + ); + yield message + } + warn!("outbound discovery request stream has ended!"); + }; + + let mut response_stream = + self.client_binding.delta_request(outbound_requests).await.map_err(XdsError::GrpcStatus)?.into_inner(); + info!("xDS stream established"); + + loop { + tokio::select! { + Some(event) = self.subscriptions_rx.recv() => { + match event { + SubscriptionEvent::Subscribe(type_url, resource_id) => { + debug!( + type_url=type_url.to_string(), + resource_id, + "processing new subscription" + ); + let is_new = state.subscriptions + .entry(type_url) + .or_default() + .insert(resource_id.clone()); + if is_new { + if let Err(err) = discovery_requests_tx.send(DeltaDiscoveryRequest { + node: Some(self.node.clone()), + type_url: type_url.to_string(), + resource_names_subscribe: vec![resource_id], + ..Default::default() + }) + .await { + warn!("problems updating subscription: {:?}", err); + } + } + } + SubscriptionEvent::Unsubscribe(type_url, resource_id) => { + debug!( + type_url=type_url.to_string(), + resource_id, + "processing unsubscribe" + ); + let was_subscribed = state.subscriptions + .entry(type_url) + .or_default() + .remove(resource_id.as_str()); + if was_subscribed { + if let Err(err) = discovery_requests_tx.send(DeltaDiscoveryRequest { + node: Some(self.node.clone()), + type_url: type_url.to_string(), + resource_names_unsubscribe: vec![resource_id], + ..Default::default() + }) + .await { + warn!("problems updating subscription: {:?}", err); + } + } + } + } + } + discovered = response_stream.message() => { + let payload = discovered?; + let discovery_response = payload.ok_or(XdsError::UnknownResourceType("empty payload received".to_string()))?; + self.process_and_acknowledge(discovery_response, &discovery_requests_tx, state).await?; + } + } + } + } + + async fn process_and_acknowledge( + &mut self, + response: DeltaDiscoveryResponse, + acknowledgments_tx: &mpsc::Sender, + state: &mut DiscoveryClientState, + ) -> anyhow::Result<(), XdsError> { + let type_url = TypeUrl::try_from(response.type_url.as_ref())?; + let nonce = response.nonce.clone(); + info!(type_url = type_url.to_string(), size = response.resources.len(), "received config resources from xDS"); + + let for_removal: Vec = response + .removed_resources + .iter() + .map(|resource_id| { + debug!("received delete for config resource {}", resource_id); + if let Some(resources) = state.tracked.get_mut(&type_url) { + resources.remove(resource_id); + } + resource_id.clone() + }) + .collect(); + + let mut pending_update_versions = HashMap::::new(); + + let updates: Vec = response + .resources + .into_iter() + .filter_map(|resource| { + let resource_id = resource.name.to_string(); + let resource_version = resource.version.to_string(); + let decoded = XdsResourcePayload::try_from((resource, type_url)); + if decoded.is_err() { + warn!("problem decoding config update for {} : error {:?}", resource_id, decoded.as_ref().err()); + } else { + pending_update_versions.insert(resource_id.clone(), resource_version); + debug!("decoded config update for resource {resource_id}"); + } + decoded.ok().map(|value| XdsResourceUpdate::Update(resource_id.clone(), value)) + }) + .chain(for_removal.into_iter().map(|resource_id| XdsResourceUpdate::Remove(resource_id, type_url))) + .collect(); + + let (internal_ack_tx, internal_ack_rx) = oneshot::channel::>(); + let notification = XdsUpdateEvent { updates, ack_channel: internal_ack_tx }; + self.resources_tx + .send(notification) + .await + .map_err(|e: mpsc::error::SendError| XdsError::InternalProcessingError(e.to_string()))?; + + tokio::select! { + ack = internal_ack_rx => { + match ack { + Ok(rejected_configs) => { + let error = if rejected_configs.is_empty() { + debug!( + type_url = type_url.to_string(), + nonce, + "sending ack response after processing", + ); + let tracked_resources = state.tracked.entry(type_url).or_default(); + for (resource_id, resource_version) in pending_update_versions.drain() { + tracked_resources.insert(resource_id, resource_version); + } + None + } else { + let error = rejected_configs + .into_iter() + .map(|reject| reject.to_string()) + .collect::>() + .join("; "); + debug!( + type_url = type_url.to_string(), + error, + nonce, + "rejecting configs with nack response", + ); + Some(Status { + message: error, + ..Default::default() + }) + }; + if let Err(err) = acknowledgments_tx.send(DeltaDiscoveryRequest { + type_url: type_url.to_string(), + response_nonce: nonce, + error_detail: error, + ..Default::default() + }) + .await + { + warn!("error in send xDS ack/nack upstream {:?}", err); + } + }, + Err(err) => { + warn!("error in reading internal ack/nack {:?}", err); + }, + } + } + _ = time::sleep(Duration::from_secs(5)) => { + warn!("timed out while waiting to acknowledge config updates"); + let error = pending_update_versions.into_keys() + .collect::>() + .join("; "); + let error = Status { + message: error, + ..Default::default() + }; + let _ = acknowledgments_tx.send(DeltaDiscoveryRequest { + type_url: type_url.to_string(), + response_nonce: nonce, + error_detail: Some(error), + ..Default::default() + }) + .await; + } + } + + Ok(()) + } +} diff --git a/orion-data-plane-api/src/xds/mod.rs b/orion-data-plane-api/src/xds/mod.rs new file mode 100644 index 00000000..49d0a09c --- /dev/null +++ b/orion-data-plane-api/src/xds/mod.rs @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod bindings; +pub mod client; +pub mod model; diff --git a/orion-data-plane-api/src/xds/model.rs b/orion-data-plane-api/src/xds/model.rs new file mode 100644 index 00000000..41fb21be --- /dev/null +++ b/orion-data-plane-api/src/xds/model.rs @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::fmt; +use std::fmt::Display; +use std::fmt::Formatter; + +use anyhow::Result; +use envoy_data_plane_api::envoy::config::cluster::v3::Cluster; +use envoy_data_plane_api::envoy::config::endpoint::v3::ClusterLoadAssignment; +use envoy_data_plane_api::envoy::config::listener::v3::Listener; +use envoy_data_plane_api::envoy::config::route::v3::RouteConfiguration; +use envoy_data_plane_api::envoy::extensions::transport_sockets::tls::v3::Secret; +use envoy_data_plane_api::envoy::service::discovery::v3::{DeltaDiscoveryRequest, Resource}; +use envoy_data_plane_api::prost; +use envoy_data_plane_api::prost::Message; +use envoy_data_plane_api::tonic; +use serde::Deserialize; +use thiserror::Error; +use tokio::sync::mpsc; + +pub type ResourceId = String; +pub type ResourceVersion = String; + +#[derive(Clone, Debug)] +pub enum XdsResourceUpdate { + Update(ResourceId, XdsResourcePayload), + Remove(ResourceId, TypeUrl), +} + +impl XdsResourceUpdate { + pub fn id(&self) -> ResourceId { + match self { + XdsResourceUpdate::Update(id, _) => id.to_string(), + XdsResourceUpdate::Remove(id, _) => id.to_string(), + } + } +} + +#[derive(Clone, Debug)] +pub enum XdsResourcePayload { + Listener(ResourceId, Listener), + Cluster(ResourceId, Cluster), + Endpoints(ResourceId, ClusterLoadAssignment), + RouteConfiguration(ResourceId, RouteConfiguration), + Secret(ResourceId, Secret), +} + +impl TryFrom<(Resource, TypeUrl)> for XdsResourcePayload { + type Error = XdsError; + + fn try_from((resource, type_url): (Resource, TypeUrl)) -> Result { + let resource_id = resource.name; + resource.resource.ok_or(XdsError::MissingResource()).and_then(|res| match type_url { + TypeUrl::Listener => { + let decoded = Listener::decode(res.value.as_slice()).map_err(XdsError::Decode); + decoded.map(|value| XdsResourcePayload::Listener(resource_id, value)) + }, + TypeUrl::Cluster => { + let decoded = Cluster::decode(res.value.as_slice()).map_err(XdsError::Decode); + decoded.map(|value| XdsResourcePayload::Cluster(resource_id, value)) + }, + TypeUrl::RouteConfiguration => { + let decoded = RouteConfiguration::decode(res.value.as_slice()).map_err(XdsError::Decode); + decoded.map(|value| XdsResourcePayload::RouteConfiguration(resource_id, value)) + }, + TypeUrl::ClusterLoadAssignment => { + let decoded = ClusterLoadAssignment::decode(res.value.as_slice()).map_err(XdsError::Decode); + decoded.map(|value| XdsResourcePayload::Endpoints(resource_id, value)) + }, + TypeUrl::Secret => { + let decoded = Secret::decode(res.value.as_slice()).map_err(XdsError::Decode); + decoded.map(|value| XdsResourcePayload::Secret(resource_id, value)) + }, + }) + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Deserialize)] +pub enum TypeUrl { + Listener, + Cluster, + RouteConfiguration, + ClusterLoadAssignment, + Secret, +} + +impl fmt::Display for TypeUrl { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + TypeUrl::Listener => "type.googleapis.com/envoy.config.listener.v3.Listener".to_string(), + TypeUrl::Cluster => "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + TypeUrl::RouteConfiguration => + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration".to_string(), + TypeUrl::ClusterLoadAssignment => + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment".to_string(), + TypeUrl::Secret => "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret".to_string(), + } + ) + } +} + +impl TryFrom<&str> for TypeUrl { + type Error = XdsError; + + fn try_from(type_url_string: &str) -> Result { + match type_url_string { + "type.googleapis.com/envoy.config.listener.v3.Listener" => Ok(TypeUrl::Listener), + "type.googleapis.com/envoy.config.cluster.v3.Cluster" => Ok(TypeUrl::Cluster), + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" => Ok(TypeUrl::RouteConfiguration), + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" => Ok(TypeUrl::ClusterLoadAssignment), + "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" => Ok(TypeUrl::Secret), + value => Err(XdsError::UnknownResourceType(format!("did not recognise type_url {}", value))), + } + } +} + +#[derive(Debug)] +pub struct RejectedConfig { + name: ResourceId, + reason: anyhow::Error, +} +impl From<(ResourceId, anyhow::Error)> for RejectedConfig { + fn from(context: (ResourceId, anyhow::Error)) -> RejectedConfig { + RejectedConfig { name: context.0, reason: context.1 } + } +} +impl Display for RejectedConfig { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}: {}", self.name, self.reason) + } +} + +#[derive(Error, Debug)] +pub enum XdsError { + #[error("gRPC error ({}): {}", .0.code(), .0.message())] + GrpcStatus(#[from] tonic::Status), + #[error(transparent)] + RequestFailure(#[from] Box>), + #[error("unknown resource type: {0}")] + UnknownResourceType(String), + #[error("error decoding xDS payload: {0}")] + Decode(#[from] prost::DecodeError), + #[error("malformed xDS payload, missing resource")] + MissingResource(), + #[error("problem occured during processing")] + InternalProcessingError(String), + #[error("cannot construct client: {0}")] + BuilderFailed(String), +} diff --git a/orion-data-plane-api/tests/bootstrap.rs b/orion-data-plane-api/tests/bootstrap.rs new file mode 100644 index 00000000..ae2ea7f9 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap.rs @@ -0,0 +1,308 @@ +use envoy_data_plane_api::envoy::config::bootstrap::v3::Bootstrap; +use envoy_data_plane_api::envoy::config::core::v3::socket_address::PortSpecifier; +use envoy_data_plane_api::envoy::config::core::v3::{address, Address, SocketAddress}; +use envoy_data_plane_api::envoy::config::endpoint::v3::lb_endpoint::HostIdentifier; +use orion_data_plane_api::bootstrap_loader::bootstrap::{BootstrapLoader, BootstrapResolver, XdsConfig, XdsType}; +use orion_data_plane_api::decode::from_yaml; +use orion_data_plane_api::xds::model::TypeUrl; +use std::collections::HashSet; +use std::path::PathBuf; + +#[test] +fn read_static_resource() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + assert_eq!(listener.name, "listener_0".to_string()); + + let routes = loader.get_static_route_configs().unwrap(); + assert_eq!(routes.len(), 0); + + let mut clusters = loader.get_static_cluster_configs().unwrap(); + assert_eq!(clusters.len(), 3); + + let cluster = clusters.drain(..).next().unwrap(); + let orion_data_plane_api::envoy_data_plane_api::envoy::config::cluster::v3::Cluster { load_assignment, .. } = + cluster; + + let endpoints = load_assignment.unwrap().endpoints.drain(..).next().unwrap().lb_endpoints; + let endpoint = endpoints.first().unwrap(); + let Some(HostIdentifier::Endpoint(ref ept_any)) = endpoint.host_identifier else { + panic!("None valid endpoint"); + }; + + let Some(Address { address: ept_addr_any }) = ept_any.clone().address else { + panic!("No valid address from endpoint"); + }; + let Some(address::Address::SocketAddress(ept_socket_addr)) = ept_addr_any else { + panic!("No valid socket address from endpoint"); + }; + assert_eq!(ept_socket_addr.address, "127.0.0.1"); + let Some(PortSpecifier::PortValue(port)) = ept_socket_addr.port_specifier else { + panic!("No valid port value from endpoint"); + }; + assert_eq!(port, 5678); +} + +#[test] +fn read_dynamic_resource() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 1); + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Individual(TypeUrl::RouteConfiguration), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_string(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5678)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} + +#[test] +fn read_ads_config() { + const ADS_BOOTSTRAP: &str = r#" +dynamic_resources: + ads_config: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: ads_cluster + lds_config: + ads: {} + cds_config: + ads: {} +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + clusters: + - name: ads_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: ads_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + "#; + let bootstrap: Bootstrap = from_yaml(ADS_BOOTSTRAP).unwrap(); + let loader = BootstrapLoader::from(bootstrap); + + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 1); + + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Aggregated(HashSet::from([TypeUrl::Listener, TypeUrl::Cluster])), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_string(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5679)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} + +#[test] +fn read_mixture_config() { + const BOOTSTRAP: &str = r#" +dynamic_resources: + ads_config: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: ads_cluster + lds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: lds_cluster + cds_config: + resource_api_version: V3 + ads: {} + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + - name: lds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: lds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5677 + - name: ads_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: ads_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + + "#; + let bootstrap: Bootstrap = from_yaml(BOOTSTRAP).unwrap(); + let loader = BootstrapLoader::from(bootstrap); + + let xds_configs = loader.get_xds_configs().unwrap(); + assert_eq!(xds_configs.len(), 3); + + assert_eq!( + xds_configs[0], + XdsConfig( + XdsType::Aggregated(HashSet::from([TypeUrl::Cluster])), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_string(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5678)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); + + assert_eq!( + xds_configs[1], + XdsConfig( + XdsType::Individual(TypeUrl::Listener), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_string(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5677)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); + + assert_eq!( + xds_configs[2], + XdsConfig( + XdsType::Individual(TypeUrl::RouteConfiguration), + SocketAddress { + protocol: 0, + address: "127.0.0.1".to_string(), + ipv4_compat: false, + port_specifier: Some(PortSpecifier::PortValue(5679)), + resolver_name: String::new(), + network_namespace_filepath: String::new(), + } + ) + ); +} diff --git a/orion-data-plane-api/tests/bootstrap_with_dynamic_resource.yml b/orion-data-plane-api/tests/bootstrap_with_dynamic_resource.yml new file mode 100644 index 00000000..53ddb1e1 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_dynamic_resource.yml @@ -0,0 +1,110 @@ +dynamic_resources: + lds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: lds_cluster + cds_config: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: cds_cluster + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5679 + - name: cds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + - name: lds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: lds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5677 diff --git a/orion-data-plane-api/tests/bootstrap_with_http_connection_manager.yml b/orion-data-plane-api/tests/bootstrap_with_http_connection_manager.yml new file mode 100644 index 00000000..6b6df551 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_http_connection_manager.yml @@ -0,0 +1,105 @@ +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 1234 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + resource_api_version: V3 + api_config_source: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: rds_cluster + clusters: + - name: rds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: rds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 5678 + - name: cluster_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1111 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 2222 + + - name: cluster_2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + # Configure an HTTP/2 keep-alive to detect connection issues and reconnect + # to the admin server if the connection is no longer responsive. + connection_keepalive: + interval: 30s + timeout: 5s + load_assignment: + cluster_name: cluster_2 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 3333 + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4444 \ No newline at end of file diff --git a/orion-data-plane-api/tests/bootstrap_with_http_filters.yml b/orion-data-plane-api/tests/bootstrap_with_http_filters.yml new file mode 100644 index 00000000..ba1d8486 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_http_filters.yml @@ -0,0 +1,76 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + - filters: + - name: sending-to-server + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: http_traffic + route_config: + name: local_route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/books/" + route: + prefix_rewrite: "/" + cluster: some_service + - match: + prefix: "/" + direct_response: + status: 403 + body: + inline_string: "Page does not exist!" + http_filters: + - name: sample customized filter + typed_config: + "@type": type.googleapis.com/sample.SampleFilter + key: DATA + val: "123" + - name: bandwidth_limit_filter + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + stat_prefix: bandwidth_limit + enable_mode: REQUEST_AND_RESPONSE + limit_kbps: 1000 + - name: health_check_filter + ## https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/health_checking + typed_config: + "@type":"type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck" + pass_through_mode: true + headers: + - name: ":path" + exact_match: "/healthz" + cache_time: 30000 + + + + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 + diff --git a/orion-data-plane-api/tests/bootstrap_with_tcp_proxy.yml b/orion-data-plane-api/tests/bootstrap_with_tcp_proxy.yml new file mode 100644 index 00000000..bcf41030 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_tcp_proxy.yml @@ -0,0 +1,35 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: destination + cluster: some_service + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 \ No newline at end of file diff --git a/orion-data-plane-api/tests/bootstrap_with_tls_server.yml b/orion-data-plane-api/tests/bootstrap_with_tls_server.yml new file mode 100644 index 00000000..b3b6d988 --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_tls_server.yml @@ -0,0 +1,20 @@ + +static_resources: + listeners: + - name: listener_https + address: + socket_address: { address: 127.0.0.1, port_value: 8443 } + filterChains: + - transportSocket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + commonTlsContext: + tlsParams: + tlsMinimumProtocolVersion: TLSv1_3 + tlsMaximumProtocolVersion: TLSv1_3 + tlsCertificates: + - certificateChain: + filename: ./tests/server.crt + privateKey: + filename: ./tests/server.key diff --git a/orion-data-plane-api/tests/bootstrap_with_weighted_cluster.yml b/orion-data-plane-api/tests/bootstrap_with_weighted_cluster.yml new file mode 100644 index 00000000..9bca0baa --- /dev/null +++ b/orion-data-plane-api/tests/bootstrap_with_weighted_cluster.yml @@ -0,0 +1,59 @@ +admin: + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + +static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.tcp_proxy + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + stat_prefix: destination + weighted_clusters: + - cluster_weight: + name: some_service_0 + weight: 25 + - cluster_weight: + name: some_service_1 + weight: 75 + clusters: + - name: some_service_0 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 1201 + - name: some_service_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1200 + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1201 \ No newline at end of file diff --git a/orion-data-plane-api/tests/envoy_validation.rs b/orion-data-plane-api/tests/envoy_validation.rs new file mode 100644 index 00000000..9319386b --- /dev/null +++ b/orion-data-plane-api/tests/envoy_validation.rs @@ -0,0 +1,134 @@ +use orion_data_plane_api::bootstrap_loader::bootstrap::{BootstrapLoader, BootstrapResolver}; +use orion_data_plane_api::decode::from_yaml; +use orion_data_plane_api::envoy_validation::{ClusterValidation, FilterChainValidation, FilterValidation, LocalRateLimitValidation}; +use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::http_connection_manager::CodecType; +use orion_data_plane_api::envoy_data_plane_api::google::protobuf::Duration; +use std::path::PathBuf; + +#[test] +fn yaml_get_downstream_tls_context() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_tls_server.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + + let ctx = fc[0].get_downstream_tls_context().unwrap().expect("DownstreamTlsContext is missing"); + assert_eq!(ctx.common_tls_context.unwrap().tls_params.unwrap().tls_minimum_protocol_version, 4); + //tls1.3 +} + +#[test] +fn yaml_get_downstream_tls_context_is_none() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + assert!(fc[0].get_downstream_tls_context().unwrap().is_none()); +} + +#[test] +fn yaml_get_http_connection_manager() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_http_connection_manager.yml"); + let loader = BootstrapLoader::load(path.into_os_string().into_string().unwrap()); + let listeners = loader.get_static_listener_configs().unwrap(); + let listener = listeners.first().unwrap(); + + let fc = &listener.filter_chains; + assert_eq!(fc.len(), 1); + + let _httpman = fc[0].filters[0].get_http_connection_manager().unwrap().expect("HttpConnectionManager is missing"); +} + +#[test] +fn filter_codec_type() { + const INP_FILTER: &str = r#" +name: http_gateway +typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1"#; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::listener::v3::Filter; + let filter: Filter = from_yaml(INP_FILTER).unwrap(); + let httpman = filter.get_http_connection_manager().unwrap().unwrap(); + assert_eq!(CodecType::try_from(httpman.codec_type).unwrap().as_str_name(), "HTTP1"); +} + +#[test] +fn cluster_http_proto_options_ext() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_dynamic_resource.yml"); + + const INP_CLUSTER: &str = r#" +name: xds_cluster +connect_timeout: 0.25s +type: STATIC +lb_policy: ROUND_ROBIN +typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + connection_keepalive: + interval: 30s + timeout: 5s +"#; + + use orion_data_plane_api::envoy_data_plane_api::envoy::config::cluster::v3::Cluster; + use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::upstreams::http::v3::http_protocol_options::explicit_http_config::ProtocolConfig; + use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::upstreams::http::v3::http_protocol_options::UpstreamProtocolOptions; + + let cluster: Cluster = from_yaml(INP_CLUSTER).unwrap(); + let proto_opts = cluster.get_http_protocol_options().unwrap().unwrap(); + + let upstream_opts = proto_opts.upstream_protocol_options.unwrap(); + if let UpstreamProtocolOptions::ExplicitHttpConfig(cfg) = upstream_opts { + if let ProtocolConfig::Http2ProtocolOptions(ref h2_opts) = cfg.protocol_config.as_ref().unwrap() { + let ka = h2_opts.connection_keepalive.as_ref().unwrap(); + assert_eq!(ka.interval.as_ref().unwrap(), &Duration { seconds: 30, nanos: 0 }); + assert_eq!(ka.timeout.as_ref().unwrap(), &Duration { seconds: 5, nanos: 0 }); + } else { + panic!("Expecting http2 options, got {:?}", cfg); + } + } else { + panic!("Expecting ExplicitHttpConfig, got {:?}", upstream_opts); + } +} + +#[test] +fn yaml_get_local_ratelimit() { + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests"); + path.push("bootstrap_with_local_ratelimit.yml"); + + const INP_LOCAL_RATELIMIT: &str = r#" + match: {prefix: "/path/with/rate/limit"} + route: {cluster: service_protected_by_rate_limit} + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_ratelimit + token_bucket: + max_tokens: "10000" + tokens_per_fill: "1000" + fill_interval: "5s" +"#; + + use orion_data_plane_api::envoy_data_plane_api::envoy::config::route::v3::Route; + + let route: Route = from_yaml(INP_LOCAL_RATELIMIT).unwrap(); + let local_ratelimit = route.get_local_ratelimit().unwrap().unwrap(); + assert_eq!(local_ratelimit.token_bucket.unwrap().max_tokens, 10000); +} diff --git a/orion-data-plane-api/tests/xds.rs b/orion-data-plane-api/tests/xds.rs new file mode 100644 index 00000000..426f1814 --- /dev/null +++ b/orion-data-plane-api/tests/xds.rs @@ -0,0 +1,443 @@ +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; + +use hyper_util::rt::tokio::TokioIo; +use orion_data_plane_api::envoy_data_plane_api::envoy::config::cluster::v3::Cluster; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::cluster::v3::cluster_discovery_service_client::ClusterDiscoveryServiceClient; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::cluster::v3::cluster_discovery_service_server::{ + ClusterDiscoveryService, ClusterDiscoveryServiceServer, +}; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::aggregated_discovery_service_client::AggregatedDiscoveryServiceClient; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::aggregated_discovery_service_server::{ + AggregatedDiscoveryService, AggregatedDiscoveryServiceServer, +}; +use orion_data_plane_api::envoy_data_plane_api::tonic; +use orion_data_plane_api::xds::client::DiscoveryClientBuilder; +use tonic::transport::Server; + +use orion_data_plane_api::xds::bindings; +use orion_data_plane_api::xds::model::{TypeUrl, XdsResourceUpdate}; + +use futures::Stream; +use orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::Node; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::{ + DeltaDiscoveryResponse, DiscoveryResponse, Resource, +}; +use orion_data_plane_api::envoy_data_plane_api::google::protobuf::Any; +use orion_data_plane_api::envoy_data_plane_api::prost::Message; +use tokio::sync::{mpsc, Mutex}; +use tokio::time::{self, sleep}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::transport::Uri; +use tonic::{Response, Status}; +use tower::service_fn; +pub struct MockDiscoveryService { + relay: Arc>>>, +} + +#[tonic::async_trait] +impl AggregatedDiscoveryService for MockDiscoveryService { + type StreamAggregatedResourcesStream = Pin> + Send>>; + async fn stream_aggregated_resources( + &self, + _request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + unimplemented!("not used by proxy"); + } + + type DeltaAggregatedResourcesStream = Pin> + Send>>; + async fn delta_aggregated_resources( + &self, + request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + let mut in_stream = request.into_inner(); + let (tx, rx) = mpsc::channel::>(100); + let shared_receiver = self.relay.clone(); + tokio::spawn(async move { + let mut receiver = shared_receiver.lock().await; + 'outer: while let Ok(result) = in_stream.message().await { + match result { + Some(_) => { + while let Some(wrapped_response) = receiver.recv().await { + match tx.send(wrapped_response.clone()).await { + Ok(_) => { + if wrapped_response.is_err() { + break 'outer; + } + }, + _ => { + break 'outer; + }, + } + } + }, + _ => { + break; + }, + } + } + }); + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::DeltaAggregatedResourcesStream)) + } +} + +#[tonic::async_trait] +impl ClusterDiscoveryService for MockDiscoveryService { + type StreamClustersStream = Pin> + Send>>; + async fn stream_clusters( + &self, + _request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + unimplemented!("not used by proxy"); + } + + type DeltaClustersStream = Pin> + Send>>; + async fn delta_clusters( + &self, + request: tonic::Request< + tonic::Streaming< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DeltaDiscoveryRequest, + >, + >, + ) -> std::result::Result, tonic::Status> { + let mut in_stream = request.into_inner(); + let (tx, rx) = mpsc::channel::>(100); + let shared_receiver = self.relay.clone(); + tokio::spawn(async move { + let mut receiver = shared_receiver.lock().await; + 'outer: while let Ok(result) = in_stream.message().await { + match result { + Some(_) => { + while let Some(wrapped_response) = receiver.recv().await { + match tx.send(wrapped_response.clone()).await { + Ok(_) => { + if wrapped_response.is_err() { + break 'outer; + } + }, + _ => { + break 'outer; + }, + } + } + }, + _ => { + break; + }, + } + } + }); + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::DeltaClustersStream)) + } + + async fn fetch_clusters( + &self, + _request: tonic::Request< + orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::DiscoveryRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + unimplemented!("not used by proxy"); + } +} + +#[tokio::test] +async fn test_client_operations() { + let node = Node { id: "node-id".to_string(), cluster: "gw-cluster".to_string(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_string(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_string(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + value: cluster.encode_to_vec().into(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_string(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + tokio::spawn(async move { + Server::builder() + .add_service(ClusterDiscoveryServiceServer::new(cds_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, "client is already taken")) + } + } + })) + .await; + + let cds_client = ClusterDiscoveryServiceClient::new(channel.unwrap()); + let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + + let (mut worker, mut client) = + DiscoveryClientBuilder::::new(node, typed_binding).build().unwrap(); + + tokio::spawn(async move { + let _status = worker.run().await; + }); + + let _status = server_side_response_tx.send(initial_response).await; + + let _ = client.subscribe("".to_string(), TypeUrl::Cluster).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload)) => { + assert_eq!(name, "cluster-a"); + let ack_result = captured_response.ack_channel.send(vec![]); + assert!(ack_result.is_ok(), "failed to acknowledge response"); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(5)) => + panic!("timed out waiting for xds resource over update channel") + } +} + +#[tokio::test] +async fn test_client_resilience() { + let node = Node { id: "node-id".to_string(), cluster: "gw-cluster".to_string(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_string(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_string(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + value: cluster.encode_to_vec().into(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_string(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let cds_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + + tokio::spawn(async move { + Server::builder() + .add_service(ClusterDiscoveryServiceServer::new(cds_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector_lazy(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, "client is already taken")) + } + } + })); + + let cds_client = ClusterDiscoveryServiceClient::new(channel); + let typed_binding = bindings::ClusterDiscoveryType { underlying_client: cds_client }; + + let (mut worker, mut client) = DiscoveryClientBuilder::::new(node, typed_binding) + .subscribe_resource_name("cluster-a".to_string()) + .subscribe_resource_name("cluster-b".to_string()) + .build() + .unwrap(); + + tokio::spawn(async move { + let _status = worker.run().await; + }); + let captured_count = AtomicUsize::new(0); + + let _status = server_side_response_tx.send(initial_response.clone()).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload)) => { + assert_eq!(name, "cluster-a"); + let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); + assert_eq!( + captured_count.load(Ordering::Relaxed), + 1, + "cluster-a should be captured just once after some time" + ); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(3)) => + panic!("timed out waiting for xds resource over update channel") + } + + let abort_response: Result = + Err(tonic::Status::aborted("kill the stream for testing purposes")); + let _status = server_side_response_tx.send(abort_response).await; + sleep(Duration::from_millis(300)).await; + + let _status = server_side_response_tx.send(initial_response.clone()).await; + sleep(Duration::from_millis(300)).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload)) => { + assert_eq!(name, "cluster-a"); + let _cnt = captured_count.fetch_add(1, Ordering::Relaxed); + assert_eq!( + captured_count.load(Ordering::Relaxed), + 2, + "cluster-a should be captured again after reconnect" + ); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(3)) => + panic!("timed out waiting for xds resource over update channel") + } +} + +#[tokio::test] +async fn test_aggregated_discovery() { + let node = Node { id: "node-id".to_string(), cluster: "gw-cluster".to_string(), ..Default::default() }; + let cluster = Cluster { name: "cluster-a".to_string(), ..Default::default() }; + let cluster_resource = Resource { + name: cluster.name.clone(), + version: "0.1".to_string(), + resource: Some(Any { + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + value: cluster.encode_to_vec().into(), + }), + ..Default::default() + }; + let resources = vec![cluster_resource]; + + let initial_response: Result = Ok(DeltaDiscoveryResponse { + resources, + nonce: "abcd".to_string(), + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_string(), + ..Default::default() + }); + + let (server_side_response_tx, server_side_response_rx) = + mpsc::channel::>(100); + + let (client, server) = tokio::io::duplex(1024); + let ads_server = MockDiscoveryService { relay: Arc::new(Mutex::new(server_side_response_rx)) }; + tokio::spawn(async move { + Server::builder() + .add_service(AggregatedDiscoveryServiceServer::new(ads_server)) + .serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server))) + .await + }); + + let mut client = Some(client); + let channel = tonic::transport::Endpoint::try_from("http://[::]:50051") + .expect("failed to init Endpoint") + .connect_with_connector(service_fn(move |_: Uri| { + let client = client.take(); + async move { + if let Some(client) = client { + Ok(TokioIo::new(client)) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, "client is already taken")) + } + } + })) + .await + .unwrap(); + + let ads_client = AggregatedDiscoveryServiceClient::new(channel.clone()); + let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + + let client = DiscoveryClientBuilder::::new(node.clone(), typed_binding) + .subscribe_resource_name("my-cluster".to_string()) + .build(); + assert!(client.is_err(), "cannot subscribe to resources without a type_url for ADS"); + + let ads_client = AggregatedDiscoveryServiceClient::new(channel); + let typed_binding = bindings::AggregatedDiscoveryType { underlying_client: ads_client }; + + let (mut worker, mut client) = + DiscoveryClientBuilder::::new(node, typed_binding) + .subscribe_resource_name_by_typeurl("cluster-a".to_string(), TypeUrl::Cluster) + .subscribe_resource_name_by_typeurl("cluster-z".to_string(), TypeUrl::Cluster) + .subscribe_resource_name_by_typeurl("endpoints-a".to_string(), TypeUrl::ClusterLoadAssignment) + .subscribe_resource_name_by_typeurl("secret-config-a".to_string(), TypeUrl::Secret) + .build() + .unwrap(); + + tokio::spawn(async move { + let _status = worker.run().await; + }); + + let _status = server_side_response_tx.send(initial_response).await; + + let _ = client.subscribe("".to_string(), TypeUrl::Cluster).await; + + tokio::select! { + Some(captured_response) = client.recv() => { + match captured_response.updates.first() { + Some(XdsResourceUpdate::Update(name, _payload)) => { + assert_eq!(name, "cluster-a"); + } + _ => panic!("failed to receive config update from xDS") + } + } + _ = time::sleep(Duration::from_secs(5)) => + panic!("timed out waiting for xds resource over update channel") + } +} diff --git a/orion-error/Cargo.toml b/orion-error/Cargo.toml new file mode 100644 index 00000000..3048a47c --- /dev/null +++ b/orion-error/Cargo.toml @@ -0,0 +1,7 @@ +[package] +edition = "2021" +name = "orion-error" +version = "0.1.0" + +[lints] +workspace = true diff --git a/orion-error/src/lib.rs b/orion-error/src/lib.rs new file mode 100644 index 00000000..8feabbce --- /dev/null +++ b/orion-error/src/lib.rs @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{ + borrow::Cow, + error::Error as ErrorTrait, + fmt::{Debug, Display}, + ops::{Deref, DerefMut}, + result::Result as StdResult, +}; +type BoxedErr = Box; + +// we define two types, Error and ErrorImpl here because we want our exported Error type to both +// implement the Error trait and implement From. +// unfortunately, these traits can't be implemented simultaniously, as the From would conflict with +// the blanket From impl. +// +// By introducing two types we can work around this. Error implements From but not ErrorTrait +// and ErrorImpl implements ErrorTrait but not From. +// additionally, we have Error impl DerefMut ErrorImpl, through which it will still inherit the trait methods defined on +// ErrorImpl. +pub struct Error(ErrorImpl); +pub type Result = StdResult; + +impl Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + ::fmt(&self.0, f) + } +} + +impl Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + ::fmt(&self.0, f) + } +} + +impl Error { + #[must_use] + pub fn context>>(self, msg: T) -> Self { + Self(ErrorImpl::Msg(msg.into(), self.0.into())) + } + + pub fn inner(self) -> impl ErrorTrait + Send + Sync + 'static { + self.0 + } +} + +impl AsRef<(dyn ErrorTrait + Send + Sync + 'static)> for Error { + fn as_ref(&self) -> &(dyn ErrorTrait + Send + Sync + 'static) { + &self.0 + } +} + +impl Deref for Error { + type Target = dyn ErrorTrait + 'static; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Error { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +// allows the use of the try operator (`?`) on Results in functions returning `Result`` +impl> From for Error { + fn from(value: E) -> Self { + Self(ErrorImpl::Error(value.into())) + } +} + +enum ErrorImpl { + /// an error without any context attached + Error(BoxedErr), + // an error message with a parent context + Msg(Cow<'static, str>, BoxedErr), +} + +impl ErrorTrait for ErrorImpl { + fn source(&self) -> Option<&(dyn ErrorTrait + 'static)> { + match self { + Self::Error(err) => err.source(), + Self::Msg(_, err) => Some(err.as_ref()), + } + } +} + +impl Debug for ErrorImpl { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Error(err) => ::fmt(err, f), + Self::Msg(msg, _) => f.write_str(msg), + }?; + + if let Some(first_source) = self.source() { + let mut level = 0; + //only print the level if there's at least 2 sources + let print_level = first_source.source().is_some(); + f.write_str("\n\ncaused by:")?; + let mut next_source = Some(first_source); + while let Some(source) = next_source { + if print_level { + f.write_fmt(format_args!("\n{level: >4}: {source}")) + } else { + f.write_fmt(format_args!("\n {source}")) + }?; + next_source = source.source(); + level += 1; + } + } + Ok(()) + } +} + +impl Display for ErrorImpl { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Msg(msg, _) => as Display>::fmt(msg, f), + Self::Error(err) => ::fmt(err, f), + } + } +} + +// alows for doing error.context("some description") on any error +pub trait ErrorExtension { + #[must_use] + fn context>>(self, context: T) -> Error; +} + +impl ErrorExtension for E { + fn context>>(self, msg: T) -> Error { + Error(ErrorImpl::Msg(msg.into(), self.into())) + } +} + +// alows for doing error.context("some description") on results +pub trait ResultExtension { + type T; + fn context>>(self, context: Msg) -> Result; + fn with_context Msg, Msg: Into>>(self, context_fn: F) -> Result; +} + +impl ResultExtension for StdResult { + type T = T; + fn context>>(self, context: Msg) -> Result { + self.map_err(|e| e.context(context)) + } + fn with_context Msg, Msg: Into>>(self, context_fn: F) -> Result { + self.map_err(|e| e.context(context_fn())) + } +} + +// Error does not implement the ErrorTrait, so the previous impl does not apply to it +impl ResultExtension for Result { + type T = T; + fn context>>(self, context: Msg) -> Result { + self.map_err(|e| e.context(context)) + } + fn with_context Msg, Msg: Into>>(self, context_fn: F) -> Result { + self.map_err(|e| e.context(context_fn())) + } +} diff --git a/orion-lib/Cargo.toml b/orion-lib/Cargo.toml new file mode 100644 index 00000000..bab0acbd --- /dev/null +++ b/orion-lib/Cargo.toml @@ -0,0 +1,67 @@ +[package] +edition = "2021" +name = "orion-lib" +publish = ["rust-inhuawei-com"] +version = "0.1.0" + +[dependencies] +abort-on-drop.workspace = true +async-stream = "0.3" +atomic-time = "0.1.4" +bytes.workspace = true +compact_str.workspace = true +enum_dispatch = "0.3.13" +exponential-backoff.workspace = true +futures.workspace = true +futures-util = { version = "0.3", default-features = false } +h2 = "0.4.5" +hickory-resolver = { version = "0.24", features = ["system-config"] } +http.workspace = true +http-body.workspace = true +http-body-util.workspace = true +hyper = { version = "1", features = ["full"] } +hyper-rustls = { version = "0.27.1", features = ["default", "http2"] } +hyper-util.workspace = true +ipnet = "2.9" +once_cell = { version = "1.19" } +orion-configuration.workspace = true +orion-error.workspace = true +orion-xds.workspace = true +parking_lot = "0.12.3" +pin-project = "1.1.5" +pingora-timeout = "0.3.0" +pretty-duration = "0.1.1" +rand = { version = "0.8.5", features = ["small_rng"] } +regex.workspace = true +rustc-hash = "2.1.1" +rustls = "0.23" +rustls-pemfile = "2.1" +rustls-platform-verifier = { version = "0.3" } +rustls-webpki = "0.102" +serde.workspace = true +thiserror.workspace = true +thread_local = "1.1.8" +tokio.workspace = true +tokio-rustls = "0.26" +tokio-stream.workspace = true +tower.workspace = true +tower-service = "0.3.2" +tracing.workspace = true +twox-hash = "1.6.3" +typed-builder = "0.18.2" +url.workspace = true +x509-parser = { version = "0.16", features = ["default"] } + +[dev-dependencies] +orion-data-plane-api.workspace = true +serde_yaml.workspace = true +tracing-test.workspace = true + +[features] +# making sure we are not using pure ring +aws_lc_rs = ["rustls/aws_lc_rs"] +default = ["aws_lc_rs"] + + +[lints] +workspace = true diff --git a/orion-lib/README.md b/orion-lib/README.md new file mode 100644 index 00000000..69c7d7a8 --- /dev/null +++ b/orion-lib/README.md @@ -0,0 +1,68 @@ +## What does it do +It is more Envoy + +# Running +Default bootstrap config: +> +> ### Listeners: +> - server-1-tls ----> domains [127.0.0.1 or wildcard ] ----> path / -----> cluster selector local\_cluster +> - server-1-tls ----> domains [127.0.0.1 or wildcard ] ----> path /proxy ----> cluster selector remote\_cluster +> - server-2-plaintext ----> routes [wildcard ] ----> path / -----> cluster selector local\_cluster +> - server-2-plaintext ----> routes [wildcard ] ----> path /ignored -----> cluster selector local\_cluster +> +> ### Clusters: +> - cluster local-srv/local\_cluster STATIC load balance endpoints [127.0.0.1:5000. 127.0.0.1:4000] +> - cluster remove-srv/local\_cluster STATIC load balance endpoints [127.0.0.1:5000. 127.0.0.1:4000] +> - cluster proxy-srv/remote\_cluster STATIC load balance endpoints [127.0.0.1:6000] +> +> + + +## Generate self sign certs: +```bash +openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout self_signed_certs/key.pem -out self_signed_certs/cert.pem +``` + +## Run downstream/backend servers__ + +```bash +cd ng3_proxy/tools/ +cd simple_server +cargo build --release +cd .. +./release_start_clients.sh +``` + +## Run proxy +```bash +cargo run -p orion-proxy -- --config orion-proxy/conf/orion-runtime.yaml --with-envoy-bootstrap orion-proxy/conf/orion-bootstrap.yaml + +``` + +## Run upstream/client +```bash +curl -k -H "host: http://127.0.0.1" http://127.0.0.1:8000 +curl -kvi https://127.0.0.1:8000/ +curl -vki http://127.0.0.1:8001/ + +curl -vik --resolve example.com:8443:127.0.0.1 https://example.com:8443/proxy +curl -vik --resolve dublin_1.irc.huawei.com:8443:127.0.0.1 https://dublin_1.irc.huawei.com:8443 +curl -vik --resolve dublin_1.beefcake.com:8443:127.0.0.1 https://dublin_1.beefcake.com:8443 + +curl -vik --resolve dublin_1.irc.huawei.com:9443:127.0.0.1 https://dublin_1.irc.huawei.com:9443 +curl -vik --resolve dublin_1.beefcake.com:9443:127.0.0.1 https://dublin_1.beefcake.com:9443 + + + +curl -vik --cacert test_certs/beefcake.intermediate.ca-chain.cert.pem --cert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --key test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem --resolve dublin_1.beefcake.com:9443:127.0.0.1 https://dublin_1.beefcake.com:9443 + +curl -vik --cacert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --cert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --key test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem --resolve dublin_1.beefcake.com:9443:127.0.0.1 https://dublin_1.beefcake.com:9443 + +curl -vi --cacert test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem --cert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --key test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem --resolve athlone_2.beefcake.com:8443:127.0.0.1 https://athlone_2.beefcake.com:8443 + + +curl -vik -H "xd-req-remove-route: dummy" -H "xd-req-remove-vh: dummy" -H "xd-resp-pass-vh: dummy" -H "xd-req-pass-vh: dummy" --resolve example.com:8443:127.0.0.1 https://example.com:8443/proxy + + +// this will return an error directly from the simple_server since simple server doesn't understand TLS +curl -vik -H "xd-req-remove-route: dummy" -H "xd-req-remove-vh: dummy" -H "xd-resp-pass-vh: dummy" -H "xd-req-pass-vh: dummy" --resolve blah.com:8001:127.0.0.1 https://blah.com:8001 diff --git a/orion-lib/src/body/mod.rs b/orion-lib/src/body/mod.rs new file mode 100644 index 00000000..89b93709 --- /dev/null +++ b/orion-lib/src/body/mod.rs @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +//! Middleware that applies a timeout to requests. +//! +//! If the request does not complete within the specified timeout it will be aborted and a `408 +//! Request Timeout` response will be sent. +//! +//! # Differences from `tower::timeout` +//! +//! tower's [`Timeout`](tower::timeout::Timeout) middleware uses an error to signal timeout, i.e. +//! it changes the error type to [`BoxError`](tower::BoxError). For HTTP services that is rarely +//! what you want as returning errors will terminate the connection without sending a response. +//! +//! This middleware won't change the error type and instead return a `408 Request Timeout` +//! response. That means if your service's error type is [`Infallible`] it will still be +//! [`Infallible`] after applying this middleware. +//! + +pub mod poly_body; +pub mod timeout_body; diff --git a/orion-lib/src/body/poly_body.rs b/orion-lib/src/body/poly_body.rs new file mode 100644 index 00000000..9bc19674 --- /dev/null +++ b/orion-lib/src/body/poly_body.rs @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::timeout_body::{TimeoutBody, TimeoutBodyError}; +use bytes::Bytes; +use http_body_util::{Empty, Full}; +use hyper::body::{Body, Incoming}; +use orion_xds::grpc_deps::{GrpcBody, Status as GrpcError}; +use pin_project::pin_project; + +#[pin_project(project = PolyBodyProj)] +pub enum PolyBody { + Empty(#[pin] Empty), + Full(#[pin] Full), + Incoming(#[pin] Incoming), + Timeout(#[pin] TimeoutBody), + Grpc(#[pin] GrpcBody), +} + +impl Default for PolyBody { + #[inline] + fn default() -> Self { + PolyBody::Empty(Empty::::default()) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum PolyBodyError { + #[error(transparent)] + Hyper(#[from] hyper::Error), + #[error(transparent)] + Infallible(#[from] std::convert::Infallible), + #[error(transparent)] + Grpc(#[from] GrpcError), + #[error(transparent)] + Boxed(#[from] Box), + #[error("data was not received within the designated timeout")] + TimedOut, +} + +//hyper::Error is the error type returned by incoming +impl From> for PolyBodyError { + fn from(value: TimeoutBodyError) -> Self { + match value { + TimeoutBodyError::TimedOut => Self::TimedOut, + TimeoutBodyError::BodyError(e) => Self::Hyper(e), + } + } +} + +impl Body for PolyBody { + type Data = Bytes; + type Error = PolyBodyError; + + fn poll_frame( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll, Self::Error>>> { + match self.project() { + PolyBodyProj::Empty(e) => e.poll_frame(cx).map_err(Into::into), + PolyBodyProj::Full(f) => f.poll_frame(cx).map_err(Into::into), + PolyBodyProj::Incoming(i) => i.poll_frame(cx).map_err(Into::into), + PolyBodyProj::Timeout(t) => t.poll_frame(cx).map_err(Into::into), + PolyBodyProj::Grpc(g) => g.poll_frame(cx).map_err(Into::into), + } + } +} + +impl From> for PolyBody { + #[inline] + fn from(body: Empty) -> Self { + PolyBody::Empty(body) + } +} + +impl From> for PolyBody { + #[inline] + fn from(body: Full) -> Self { + PolyBody::Full(body) + } +} + +impl From for PolyBody { + #[inline] + fn from(body: Incoming) -> Self { + PolyBody::Incoming(body) + } +} + +impl From> for PolyBody { + #[inline] + fn from(body: TimeoutBody) -> Self { + PolyBody::Timeout(body) + } +} + +impl From for PolyBody { + #[inline] + fn from(body: GrpcBody) -> Self { + PolyBody::Grpc(body) + } +} diff --git a/orion-lib/src/body/timeout_body.rs b/orion-lib/src/body/timeout_body.rs new file mode 100644 index 00000000..3d8ddbef --- /dev/null +++ b/orion-lib/src/body/timeout_body.rs @@ -0,0 +1,169 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +/// Middleware that applies a timeout to request and response bodies. +/// +/// Wrapper around a [`http_body::Body`] to time out if data is not ready within the specified duration. +/// +/// Bodies must produce data at most within the specified timeout. +/// If the body does not produce a requested data frame within the timeout period, it will return an error. +/// +/// This `TimeoutBody` variant differs from `tower_http::timeout::TimeoutBody` in two ways: +/// 1. Unpin: The original `TimeoutBody` is !Unpin, while this version is Unpin to enable use in certain asynchronous contexts. +/// 2. Optional Timeout: The timeout is wrapped in `Option`, allowing for cases where a timeout may not be necessary. +/// +use http_body::Body; +use pin_project::pin_project; +use pingora_timeout::fast_timeout::{fast_timeout, FastTimeout}; +use pingora_timeout::Timeout; +use std::{ + future::{pending, Future, Pending}, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; + +#[pin_project] +pub struct TimeoutBody { + timeout: Option, + #[pin] + sleep: Option, FastTimeout>>>>, + #[pin] + body: B, +} + +impl TimeoutBody { + /// Creates a new [`TimeoutBody`]. + pub fn new(timeout: Option, body: B) -> Self { + TimeoutBody { timeout, sleep: None, body } + } +} + +impl Body for TimeoutBody +where + B: Body, + B::Error: std::error::Error, +{ + type Data = B::Data; + type Error = TimeoutBodyError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + if let Some(timeout) = this.timeout { + // Start the `Sleep` if not active. + let sleep_pinned = if let Some(some) = this.sleep.as_mut().as_pin_mut() { + some + } else { + Pin::new(this.sleep.insert(Box::pin(fast_timeout(*timeout, pending())))) + }; + + // Error if the timeout has expired. + if let Poll::Ready(_) = sleep_pinned.poll(cx) { + return Poll::Ready(Some(Err(TimeoutBodyError::TimedOut))); + } + + // Check for body data. + let frame = ready!(this.body.poll_frame(cx)); + + // A frame is ready. Reset the `Sleep`... + this.sleep.set(None); + + Poll::Ready(frame.transpose().map_err(TimeoutBodyError::BodyError).transpose()) + } else { + this.body.poll_frame(cx).map_err(TimeoutBodyError::BodyError) + } + } +} + +/// Error for [`TimeoutBody`]. +#[derive(thiserror::Error, Debug)] +pub enum TimeoutBodyError { + #[error("data was not received within the designated timeout")] + TimedOut, + #[error(transparent)] + BodyError(E), +} + +#[cfg(test)] +mod tests { + use super::*; + + use bytes::Bytes; + use http_body::Frame; + use http_body_util::BodyExt; + use pin_project::pin_project; + use std::{error::Error, fmt::Display}; + use tokio::time::{sleep, Sleep}; + + #[derive(Debug)] + struct MockError; + + impl Error for MockError {} + + impl Display for MockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "mock error") + } + } + + #[pin_project] + struct MockBody { + #[pin] + sleep: Sleep, + } + + impl Body for MockBody { + type Data = Bytes; + type Error = MockError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + this.sleep.poll(cx).map(|()| Some(Ok(Frame::data(vec![].into())))) + } + } + + #[tokio::test] + async fn test_body_available_within_timeout() { + let mock_sleep = Duration::from_secs(1); + let timeout_sleep = Duration::from_secs(2); + + let mock_body = MockBody { sleep: sleep(mock_sleep) }; + let timeout_body = TimeoutBody::new(Some(timeout_sleep), mock_body); + + assert!(timeout_body.boxed_unsync().frame().await.expect("no frame").is_ok()); + } + + #[tokio::test] + async fn test_body_unavailable_within_timeout_error() { + let mock_sleep = Duration::from_secs(2); + let timeout_sleep = Duration::from_secs(1); + + let mock_body = MockBody { sleep: sleep(mock_sleep) }; + let timeout_body = TimeoutBody::new(Some(timeout_sleep), mock_body); + + assert!(timeout_body.boxed_unsync().frame().await.unwrap().is_err()); + } +} diff --git a/orion-lib/src/clusters/balancers/default_balancer.rs b/orion-lib/src/clusters/balancers/default_balancer.rs new file mode 100644 index 00000000..81138a43 --- /dev/null +++ b/orion-lib/src/clusters/balancers/default_balancer.rs @@ -0,0 +1,582 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; + +use http::uri::Authority; +use rustc_hash::FxHashMap as HashMap; +use tracing::debug; + +use super::{ + healthy::HealthyBalancer, + priority::{Priority, PriorityInfo}, + wrr::{self, WeightedRoundRobinBalancer}, + Balancer, +}; +use crate::{ + clusters::{ + health::{EndpointHealth, HealthStatus, ValueUpdated}, + load_assignment::{LbEndpoint, LocalityLbEndpoints}, + }, + Result, +}; + +pub trait WeightedEndpoint { + fn weight(&self) -> u32; +} + +pub trait EndpointWithLoad { + fn http_load(&self) -> u32; +} + +pub trait EndpointWithAuthority { + fn authority(&self) -> &Authority; +} + +#[derive(Clone, Debug)] +pub struct LbItem { + pub item: Arc, + pub weight: u32, +} + +impl LbItem { + pub fn new(weight: u32, item: Arc) -> Self { + Self { item, weight } + } +} + +#[derive(Debug, Clone)] +pub struct DefaultBalancer +where + B: Balancer, +{ + priority_level_lb: WeightedRoundRobinBalancer, + priorities: HashMap>>, + _type: PhantomData, +} + +impl DefaultBalancer +where + B: Balancer + FromIterator>, + E: WeightedEndpoint, +{ + pub fn update_health(&mut self, id: &E, health: HealthStatus) -> Result + where + E: Clone + Debug + PartialEq, + { + for priority_info in self.priorities.values_mut() { + if let Ok(updated) = priority_info.balancer.update_health(id, health) { + if updated == ValueUpdated::NotUpdated { + return Ok(ValueUpdated::NotUpdated); + } + + if health.is_healthy() { + priority_info.healthy += 1; + } else { + priority_info.healthy -= 1; + } + + self.priority_level_lb = Self::recalculate_priority_load_factors(&self.priorities); + return Ok(ValueUpdated::Updated); + } + } + Err(format!("Can't find endpoint {id:?}").into()) + } + + fn recalculate_priority_load_factors( + priorities: &HashMap>>, + ) -> WeightedRoundRobinBalancer { + let priority_load_weights = Priority::calculate_priority_loads(priorities); + let items = priority_load_weights.into_iter().map(|f| wrr::LbItem::new(f.1, Arc::new(f.0))); + WeightedRoundRobinBalancer::new(items) + } +} + +impl Balancer for DefaultBalancer +where + B: Debug + Balancer + FromIterator>, + E: Debug + WeightedEndpoint, +{ + fn next_item(&mut self, hash: Option) -> Option> { + let priority = self.priority_level_lb.next_item(None); + debug!("Selecting priority {priority:?} based on {:?}", self.priority_level_lb); + let priority = priority?; + let priority_info = self.priorities.get_mut(&priority)?; + let endpoint = priority_info.balancer.next_item(hash); + debug!("Selecting endpoint {endpoint:?} based on {priority_info:?}"); + let endpoint = endpoint?; + Some(endpoint) + } +} + +impl DefaultBalancer +where + B: Balancer + FromIterator> + Default, +{ + pub fn from_slice(endpoints: &[LocalityLbEndpoints]) -> Self { + let mut priorities = HashMap::default(); + for endpoint in endpoints { + let total = endpoint.total_endpoints; + let healthy = endpoint.healthy_endpoints; + priorities + .entry(endpoint.priority) + .and_modify(|priority_info: &mut PriorityInfo>| { + priority_info.healthy += endpoint.healthy_endpoints; + priority_info.total += endpoint.total_endpoints; + priority_info.balancer.extend(endpoint.endpoints.iter().cloned()); + debug!("Priority info {} {} {}", endpoint.priority, priority_info.healthy, priority_info.total); + }) + .or_insert(PriorityInfo { balancer: endpoint.endpoints.iter().cloned().collect(), healthy, total }); + } + + Self { priority_level_lb: Self::recalculate_priority_load_factors(&priorities), priorities, _type: PhantomData } + } +} + +#[cfg(test)] +mod test { + use compact_str::ToCompactString; + use std::sync::Arc; + + use rustls::ClientConfig; + + use super::DefaultBalancer; + use crate::{ + clusters::{ + balancers::{wrr::WeightedRoundRobinBalancer, Balancer}, + health::HealthStatus, + load_assignment::{LbEndpoint, LocalityLbEndpoints}, + }, + secrets::{TlsConfigurator, WantsToBuildClient}, + }; + type TestpointData = (u32, u32, Vec<(http::uri::Authority, u32, HealthStatus)>); + + fn get_locality_endpoints(data: Vec) -> Vec { + let mut loc_lb_endpoints = vec![]; + for (_, priority, endpoints) in data { + let mut lb_endpoints = vec![]; + let len = endpoints.len(); + let mut healthy = 0; + for (auth, weight, health_status) in endpoints { + if health_status == HealthStatus::Healthy { + healthy += 1; + } + lb_endpoints.push(Arc::new(LbEndpoint::new(auth, None, weight, health_status))); + } + + loc_lb_endpoints.push(LocalityLbEndpoints { + name: "Cluster1".to_compact_string(), + endpoints: lb_endpoints, + priority, + healthy_endpoints: healthy, + total_endpoints: u32::try_from(len).expect("Too many endpoints"), + tls_configurator: Option::>::None, + http_protocol_options: Default::default(), + connection_timeout: None, + }); + } + loc_lb_endpoints + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_all_healthy_priority_not_contigous() { + let data = vec![ + ( + 1, + 1, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 3, + vec![ + ("endpoint21:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 5, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_all_healthy_one_priority() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 1, + vec![ + ("endpoint21:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint11:8000", + "endpoint12:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_all_healthy_one_priority_two_groups() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 0, + vec![ + ("endpoint21:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint11:8000", + "endpoint12:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint11:8000", + "endpoint12:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_all_healthy_one_priority_two_different_groups() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 2, + 0, + vec![ + ("endpoint21:8000".parse().unwrap(), 2, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 2, HealthStatus::Healthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint21:8000", + "endpoint22:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint11:8000", + "endpoint12:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_all_healthy_one_priority_two_different_groups_2() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 2, + 0, + vec![ + ("endpoint21:8000".parse().unwrap(), 4, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 2, HealthStatus::Healthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint11:8000", + "endpoint12:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint21:8000", + "endpoint22:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_unhealthy_two_priority_groups() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Unhealthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Unhealthy), + ], + ), + ( + 2, + 1, + vec![ + ("endpoint21:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint22:8000", + "endpoint21:8000", + "endpoint22:8000", + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_default_loadbalancer_with_wrr_and_unhealthy_three_priority_groups() { + let data = vec![ + ( + 1, + 0, + vec![ + ("endpoint11:8000".parse().unwrap(), 1, HealthStatus::Unhealthy), + ("endpoint12:8000".parse().unwrap(), 1, HealthStatus::Unhealthy), + ], + ), + ( + 1, + 1, + vec![ + ("endpoint21:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint22:8000".parse().unwrap(), 1, HealthStatus::Unhealthy), + ], + ), + ( + 1, + 2, + vec![ + ("endpoint31:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ("endpoint32:8000".parse().unwrap(), 1, HealthStatus::Healthy), + ], + ), + ]; + + let endpoints = get_locality_endpoints(data); + let mut default_balancer: DefaultBalancer, LbEndpoint> = + DefaultBalancer::from_slice(&endpoints); + let mut results = vec![]; + for _ in 0..10 { + let next = default_balancer.next_item(None); + println!("{next:?}"); + results.push(next); + } + + let results: Vec<_> = results.into_iter().filter_map(|r| r.map(|f| f.authority.to_string())).collect(); + let expected = [ + "endpoint21:8000", + "endpoint31:8000", + "endpoint21:8000", + "endpoint21:8000", + "endpoint21:8000", + "endpoint32:8000", + "endpoint21:8000", + "endpoint21:8000", + "endpoint31:8000", + "endpoint21:8000", + ]; + assert_eq!(results, expected); + } +} diff --git a/orion-lib/src/clusters/balancers/hash_policy.rs b/orion-lib/src/clusters/balancers/hash_policy.rs new file mode 100644 index 00000000..755c8742 --- /dev/null +++ b/orion-lib/src/clusters/balancers/hash_policy.rs @@ -0,0 +1,318 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::hash::Hasher; +use std::net::SocketAddr; +use std::ops::ControlFlow; + +use http::Request; +use hyper::body::Incoming; +use orion_configuration::config::network_filters::http_connection_manager::route::{HashPolicy, HashPolicyResult}; +use twox_hash::XxHash64; + +use crate::body::timeout_body::TimeoutBody; + +#[derive(Clone, Debug)] +pub struct HashState<'a, B = TimeoutBody> { + policies: &'a [HashPolicy], + req: &'a Request, + src_addr: SocketAddr, +} + +impl<'a, B> HashState<'a, B> { + pub fn new(policies: &'a [HashPolicy], req: &'a Request, src_addr: SocketAddr) -> Self { + Self { policies, req, src_addr } + } + pub fn compute(self) -> Option { + if self.policies.is_empty() { + return None; + } + let mut hasher = DeterministicBuildHasher::build_hasher(); + match self.policies.iter().try_fold(false, |prev, policy| { + match policy.apply(&mut hasher, self.req, self.src_addr) { + HashPolicyResult::Applied => ControlFlow::Continue(true), + HashPolicyResult::Skipped => ControlFlow::Continue(prev), + HashPolicyResult::Terminal => ControlFlow::Break(()), + } + }) { + ControlFlow::Continue(applied) => applied.then_some(hasher.finish()), + ControlFlow::Break(()) => Some(hasher.finish()), + } + } +} + +/// Similar to [std::hash::BuildHasher] but with a deterministic seed. +#[derive(Default)] +pub(crate) struct DeterministicBuildHasher; + +impl DeterministicBuildHasher { + const SEED: u64 = 0; + + pub fn build_hasher() -> XxHash64 { + XxHash64::with_seed(Self::SEED) + } + + // FIXME(oriol): for some reason Clippy 1.78 was failing in CI and couldn't find `Hash`: + // error[E0404]: expected trait, found derive macro `Hash` + // ...despite `Hash` being imported. This could not be reproduced locally, so leaving this + // fully qualified name to pass CI. + pub fn hash_one_with_seed(x: T, seed: u64) -> u64 { + let mut hasher = XxHash64::with_seed(seed); + x.hash(&mut hasher); + hasher.finish() + } +} + +#[cfg(test)] +mod test { + use super::{DeterministicBuildHasher, HashPolicy, HashState}; + use http::{request::Builder, HeaderName, HeaderValue, Request}; + use orion_configuration::config::network_filters::http_connection_manager::route::PolicySpecifier; + use std::{ + hash::{Hash, Hasher}, + net::SocketAddr, + }; + use twox_hash::XxHash64; + + pub struct TestHasher(XxHash64); + + impl TestHasher { + fn new() -> Self { + Self(DeterministicBuildHasher::build_hasher()) + } + + fn hash(mut self, value: T) -> Self { + value.hash(&mut self.0); + self + } + + fn finish(self) -> u64 { + self.0.finish() + } + } + + fn build_request<'a>(uri: &str, headers: impl IntoIterator) -> Request<()> { + let mut builder = Builder::new().uri(uri); + + builder = headers.into_iter().fold(builder, |builder, (key, value)| builder.header(key, value)); + + builder.body(()).unwrap() + } + + fn hasher_from_policies<'a>(policies: impl IntoIterator) -> Vec { + policies + .into_iter() + .map(|(policy_specifier, terminal)| HashPolicy { policy_specifier: policy_specifier.clone(), terminal }) + .collect() + } + + #[test] + fn hash_policy() { + let source_ip = SocketAddr::from(([192, 168, 0, 1], 8000)); + + let policy_header = PolicySpecifier::Header(HeaderName::from_static("lb-header")); + let policy_query = PolicySpecifier::QueryParameter("lb-param".into()); + let policy_addr = PolicySpecifier::SourceIp(true); + + // Check header hashing + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false)]), + &build_request("https://example.com", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).finish() + ); + + // Case insensitive + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false)]), + &build_request("https://example.com", [("lb-header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false)]), + &build_request( + "https://example.com", + [("First-Header", "bar"), ("Lb-Header", "foo"), ("Last-Header", "zarp")] + ), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).finish() + ); + + assert!(HashState::new( + &hasher_from_policies([(&policy_header, false)]), + &build_request("https://example.com", [("Different-Header", "foo")]), + source_ip + ) + .compute() + .is_none()); + + assert!(HashState::new( + &hasher_from_policies([(&policy_header, false)]), + &build_request("https://example.com", None), + source_ip + ) + .compute() + .is_none()); + + // Check query parameter hashing + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_query, false)]), + &build_request("https://example.com/?lb-param=bar", None), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash("bar").finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_query, false)]), + &build_request("https://example.com/?first=foo&lb-param=bar&last=zarp", None), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash("bar").finish() + ); + + // Case sensitive + assert!(HashState::new( + &hasher_from_policies([(&policy_query, false)]), + &build_request("https://example.com/?Lb-Param=bar", None), + source_ip + ) + .compute() + .is_none()); + + assert!(HashState::new( + &hasher_from_policies([(&policy_query, false)]), + &build_request("https://example.com/?different-param=bar", None), + source_ip + ) + .compute() + .is_none()); + + // Check IP address hashing + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_addr, false)]), + &build_request("https://example.com/", None), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(source_ip).finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_addr, false)]), + &build_request("https://example.com/?lb-param=bar", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(source_ip).finish() + ); + + // Check chains of policies + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, false), (&policy_addr, false)]), + &build_request("https://example.com/?lb-param=bar", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).hash("bar").hash(source_ip).finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, false), (&policy_addr, false)]), + &build_request("https://example.com/", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).hash(source_ip).finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, false), (&policy_addr, false)]), + &build_request("https://example.com/?lb-param=bar", None), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash("bar").hash(source_ip).finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, false)]), + &build_request("https://example.com/?lb-param=bar", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).hash("bar").finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, true), (&policy_addr, false)]), + &build_request("https://example.com/?lb-param=bar", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).hash("bar").finish() + ); + + assert_eq!( + HashState::new( + &hasher_from_policies([(&policy_header, false), (&policy_query, true), (&policy_addr, false)]), + &build_request("https://example.com/", [("Lb-Header", "foo")]), + source_ip + ) + .compute() + .unwrap(), + TestHasher::new().hash(HeaderValue::from_static("foo")).hash(source_ip).finish() + ); + } +} diff --git a/orion-lib/src/clusters/balancers/healthy.rs b/orion-lib/src/clusters/balancers/healthy.rs new file mode 100644 index 00000000..35204859 --- /dev/null +++ b/orion-lib/src/clusters/balancers/healthy.rs @@ -0,0 +1,201 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, sync::Arc}; + +use super::{Balancer, WeightedEndpoint}; +use crate::{ + clusters::health::{EndpointHealth, HealthStatus, ValueUpdated}, + Result, +}; + +#[derive(Clone, Debug)] +pub struct LbItem { + item: Arc, + health: HealthStatus, +} + +impl LbItem { + pub fn new(health: HealthStatus, item: Arc) -> Self { + Self { item, health } + } +} + +#[derive(Clone, Debug)] +pub struct HealthyBalancer { + items: Vec>, + balancer: B, +} + +impl HealthyBalancer +where + B: Balancer + FromIterator>, + E: WeightedEndpoint, +{ + pub fn new(items: impl IntoIterator>) -> Self + where + B: Default, + { + let mut this = Self { items: items.into_iter().collect(), balancer: B::default() }; + this.reload(); + this + } + + pub fn next_item(&mut self, hash: Option) -> Option> { + self.balancer.next_item(hash) + } + + pub fn update_health(&mut self, id: &E, health: HealthStatus) -> Result + where + E: Debug + PartialEq, + { + if let Some(endpoint) = self.items.iter_mut().find(|f| id == f.item.as_ref()) { + let updated = endpoint.health.update_health(health); + if updated == ValueUpdated::Updated { + self.reload(); + } + Ok(updated) + } else { + Err(format!("Can't find endpoint {id:?}").into()) + } + } + + fn reload(&mut self) { + self.balancer = + self.items.iter().filter_map(|item| item.health.is_healthy().then_some(Arc::clone(&item.item))).collect(); + } +} + +impl HealthyBalancer +where + B: Balancer + FromIterator>, + E: EndpointHealth + WeightedEndpoint, +{ + pub fn extend(&mut self, items: impl Iterator>) { + self.items.extend(items.map(|i| LbItem::new(i.health(), i))); + self.reload(); + } +} + +impl FromIterator> for HealthyBalancer +where + B: Default + Balancer + FromIterator>, + E: EndpointHealth + WeightedEndpoint, +{ + fn from_iter>>(iter: T) -> Self { + Self::new(iter.into_iter().map(|item| LbItem::new(item.health(), item))) + } +} + +impl Balancer for HealthyBalancer +where + B: Balancer + FromIterator>, + E: WeightedEndpoint, +{ + fn next_item(&mut self, hash: Option) -> Option> { + self.next_item(hash) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use crate::clusters::{ + balancers::{healthy::HealthyBalancer, wrr::WeightedRoundRobinBalancer, WeightedEndpoint}, + health::HealthStatus, + }; + + use super::LbItem; + + #[derive(Debug, Clone, PartialEq)] + struct Blah { + value: u32, + weight: u32, + } + + impl WeightedEndpoint for Blah { + fn weight(&self) -> u32 { + self.weight + } + } + + type TestBalancer = HealthyBalancer, E>; + + /// Asserts that `balancer` generates the items in `expected_items` in the same sequential order, + /// but allowing for rotation and cycling. For example, for `expected_items = [a, b, c]` valid + /// outputs of the balancer are `[a, b, c, a, b]`, `[b, c, a, b, c]` and `[c, a, b, c, a]`. + /// This makes this test resilient to a Round-Robin that randomizes the start of the sequence. + fn compare_rotated(balancer: &mut TestBalancer, expected_items: Vec<&Arc>) { + let selected_items: Vec<_> = (0..5).map(|_| balancer.next_item(None)).collect(); + let selected: Vec<_> = selected_items.iter().map(|item| item.as_ref()).collect(); + + if expected_items.is_empty() { + assert!(selected_items.iter().all(Option::is_none)); + return; + } + + let mut expected = expected_items.into_iter().map(Some).cycle().peekable(); + + while selected.first() != expected.peek() { + expected.next(); + } + + assert!(selected.into_iter().zip(expected).all(|(a, b)| a == b)); + } + + #[test] + pub fn test_healthy_balancer_health_updates() { + let ab0 = Arc::new(Blah { value: 0, weight: 1 }); + let ab1 = Arc::new(Blah { value: 1, weight: 1 }); + let ab2 = Arc::new(Blah { value: 2, weight: 1 }); + + let mut balancer = TestBalancer::new( + [&ab0, &ab1, &ab2].into_iter().cloned().map(|item| LbItem::new(HealthStatus::Healthy, item)), + ); + + // All items are healthy + compare_rotated(&mut balancer, vec![&ab0, &ab1, &ab2]); + + // Make item 0 unhealthy + balancer.update_health(ab0.as_ref(), HealthStatus::Unhealthy).unwrap(); + compare_rotated(&mut balancer, vec![&ab1, &ab2]); + + // Make item 1 unhealthy + balancer.update_health(ab1.as_ref(), HealthStatus::Unhealthy).unwrap(); + compare_rotated(&mut balancer, vec![&ab2]); + + // Make item 2 unhealthy + balancer.update_health(ab2.as_ref(), HealthStatus::Unhealthy).unwrap(); + compare_rotated(&mut balancer, vec![]); + + // Make item 0 healthy + balancer.update_health(ab0.as_ref(), HealthStatus::Healthy).unwrap(); + compare_rotated(&mut balancer, vec![&ab0]); + + // Make item 1 healthy + balancer.update_health(ab1.as_ref(), HealthStatus::Healthy).unwrap(); + compare_rotated(&mut balancer, vec![&ab0, &ab1]); + + // Make item 2 healthy + balancer.update_health(ab2.as_ref(), HealthStatus::Healthy).unwrap(); + compare_rotated(&mut balancer, vec![&ab0, &ab1, &ab2]); + } +} diff --git a/orion-lib/src/clusters/balancers/least.rs b/orion-lib/src/clusters/balancers/least.rs new file mode 100644 index 00000000..01050309 --- /dev/null +++ b/orion-lib/src/clusters/balancers/least.rs @@ -0,0 +1,353 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, sync::Arc}; + +use rand::{rngs::SmallRng, seq::IteratorRandom, Rng, SeedableRng}; + +use super::{default_balancer::EndpointWithLoad, Balancer, WeightedEndpoint}; + +#[derive(Clone, Debug)] +pub struct LbItem { + weight: u32, + current_weight: f64, + item: Arc, +} + +impl LbItem { + fn new(weight: u32, item: Arc) -> Self { + Self { item, weight, current_weight: 0.0 } + } + + fn adjust_current_weight(&mut self, value: f64) { + self.current_weight += value; + } + + fn load(&self) -> u32 { + self.item.http_load() + } + + fn weight(&self, active_request_bias: f32) -> f64 { + if active_request_bias == 0.0 { + return f64::from(self.weight); + } + f64::from(self.weight) / f64::from(self.load() + 1).powf(active_request_bias.into()) + } +} + +#[derive(Clone, Debug)] +pub struct WeightedLeastRequestBalancer { + items: Vec>, + active_request_bias: f32, + p2c_choice_count: u32, + all_weights_equal: bool, + rng: SmallRng, +} + +const DEFAULT_ACTIVE_REQUEST_BIAS: f32 = 1.0; +const DEFAULT_P2C_CHOICE_COUNT: u16 = 2; // u16 so it can safely be converted to u32 and usize + +impl WeightedLeastRequestBalancer { + fn new(items: impl IntoIterator>) -> Self { + Self::new_with_settings(items, DEFAULT_ACTIVE_REQUEST_BIAS, u32::from(DEFAULT_P2C_CHOICE_COUNT)) + } + + fn new_with_settings( + items: impl IntoIterator>, + active_request_bias: f32, + p2c_choice_count: u32, + ) -> Self { + let rng = SmallRng::from_rng(rand::thread_rng()).expect("RNG must be valid"); + Self::new_with_settings_and_rng(items, active_request_bias, p2c_choice_count, rng) + } + + fn new_with_settings_and_rng( + items: impl IntoIterator>, + active_request_bias: f32, + p2c_choice_count: u32, + rng: SmallRng, + ) -> Self { + let items: Vec<_> = items.into_iter().collect(); + let all_weights_equal = all_equal(&items); + WeightedLeastRequestBalancer { items, active_request_bias, p2c_choice_count, all_weights_equal, rng } + } + + fn next_item_wrr(&mut self) -> Option> { + if self.items.len() <= 1 { + self.items.first().map(|item| &item.item).cloned() + } else { + // Increase the current weight of all the endpoints and calculate the total + let total: f64 = self + .items + .iter_mut() + .map(|item| { + let weight = item.weight(self.active_request_bias); + item.adjust_current_weight(weight); + weight + }) + .sum(); + // Find the item with the highest weight + // Note: not using `max_by` here because it returns the last element for equal weights + let best_item = + self.items + .iter_mut() + .reduce(|best, item| if item.current_weight > best.current_weight { item } else { best }); + // Adjust its weight and return it + best_item.map(|item| { + item.adjust_current_weight(-total); + Arc::clone(&item.item) + }) + } + } + + /// Choose one item using the Power Of Two Choice (P2C) algorithm + fn next_item_p2c(&mut self) -> Option> { + if self.items.len() <= 1 { + self.items.first().map(|item| &item.item).cloned() + } else { + // In Rust there is no safe conversion from u32 to usize + let choice_count = usize::try_from(self.p2c_choice_count).unwrap_or(usize::from(DEFAULT_P2C_CHOICE_COUNT)); + + // Randomly choose `choice_count` distinct healthy items + let chosen_items = self.items.iter().choose_multiple(&mut self.rng, choice_count); + let random_offset = self.rng.gen_range(0..chosen_items.len()); + + // Randomly rotate the list of chosen items, because `choose_multiple()` + // chooses random items but the order in which they are produced is not. + let mut chosen_items = chosen_items.into_iter().cycle().skip(random_offset).take(choice_count); + + // Select the one with least load + let first_item = chosen_items.next()?; + let best_item = chosen_items + .fold((first_item, first_item.load()), |(best_item, best_item_load), item| { + let load = item.load(); + if load < best_item_load { + (item, load) + } else { + (best_item, best_item_load) + } + }) + .0; + Some(Arc::clone(&best_item.item)) + } + } +} + +impl Default for WeightedLeastRequestBalancer { + fn default() -> Self { + Self::from_iter([]) + } +} + +impl FromIterator> for WeightedLeastRequestBalancer { + fn from_iter>>(iter: T) -> Self { + Self::new(iter.into_iter().map(|item| LbItem::new(item.weight(), item))) + } +} + +impl Balancer for WeightedLeastRequestBalancer { + fn next_item(&mut self, _hash: Option) -> Option> { + if self.all_weights_equal { + // If all weights are equal, Least Load balancer falls back to P2C + self.next_item_p2c() + } else { + // If not all weights are equal, Least Load balancer uses WRR based on load + self.next_item_wrr() + } + } +} + +fn all_equal(items: &[LbItem]) -> bool { + let mut iter = items.iter(); + if let Some(first) = iter.next() { + iter.all(|item| item.weight == first.weight) + } else { + true + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use rand::rngs::SmallRng; + use rand::SeedableRng; + + use crate::clusters::balancers::least::DEFAULT_ACTIVE_REQUEST_BIAS; + use crate::clusters::balancers::least::DEFAULT_P2C_CHOICE_COUNT; + use crate::clusters::balancers::Balancer; + use crate::clusters::balancers::EndpointWithLoad; + use crate::clusters::balancers::WeightedEndpoint; + + use super::{LbItem, WeightedLeastRequestBalancer}; + + #[derive(Clone, Debug, PartialEq)] + struct TestEndpoint { + load: Arc, + weight: u32, + } + + impl TestEndpoint { + fn new(value: usize, weight: u32) -> Self { + Self { load: Arc::new(value), weight } + } + + pub fn value(&self) -> usize { + *self.load + } + + pub fn load_reference(&self) -> Arc { + Arc::clone(&self.load) + } + } + + impl WeightedEndpoint for TestEndpoint { + fn weight(&self) -> u32 { + self.weight + } + } + + impl EndpointWithLoad for TestEndpoint { + #[allow(clippy::cast_possible_truncation)] + fn http_load(&self) -> u32 { + Arc::strong_count(&self.load) as u32 + } + } + + fn endpoints_from_weights>(weights: I) -> Vec { + weights.into_iter().enumerate().map(|(index, weight)| TestEndpoint::new(index, weight)).collect() + } + + #[test] + /// Adds 3 items with equal weight, expects the balancer to select them at least once after 20 requests. + /// Then it increases the load of each item and expect it to not be selected. + pub fn test_least_request_balancer_simple() { + let items = endpoints_from_weights(vec![1; 3]); + let lb_items = items.iter().cloned().map(|item| LbItem::new(item.weight(), Arc::new(item))); + + let rng = SmallRng::seed_from_u64(1); + let mut balancer = WeightedLeastRequestBalancer::new_with_settings_and_rng( + lb_items, + DEFAULT_ACTIVE_REQUEST_BIAS, + u32::from(DEFAULT_P2C_CHOICE_COUNT), + rng, + ); + let mut counts = vec![0; items.len()]; + (0..20).filter_map(|_| balancer.next_item(None).map(|item| item.value())).for_each(|item| counts[item] += 1); + assert!(counts.into_iter().all(|count| count > 0)); + + for busy_item in &items { + let _load = busy_item.load_reference(); + let selected: Vec<_> = (0..10).filter_map(|_| balancer.next_item(None)).collect(); + assert!(selected.iter().all(|item| item.as_ref() != busy_item), "Found {busy_item:?} in {selected:?}"); + } + } + + #[test] + /// Add items with equal weight, and force the P2C algorithm to choose among all of them. + /// Then increase the load of the second half of the items and expect them to never + /// be selected. + pub fn test_least_request_balancer_p2c_choice() { + let items: Vec<_> = endpoints_from_weights(vec![1; 10]); + let lb_items = items.iter().map(|item| LbItem::new(item.weight(), Arc::new(item.clone()))); + + let rng = SmallRng::seed_from_u64(1); + let mut balancer = + WeightedLeastRequestBalancer::new_with_settings_and_rng(lb_items, DEFAULT_ACTIVE_REQUEST_BIAS, 10, rng); + + let _load: Vec<_> = items.iter().take(5).map(TestEndpoint::load_reference).collect(); + assert!((0..10).filter_map(|_| balancer.next_item(None).map(|item| item.value())).all(|item| item >= 5)); + } + + #[test] + /// Expect the balancer to act as a simple weighted round-robin. + pub fn test_least_request_balancer_weights() { + let items: Vec<_> = endpoints_from_weights([1, 1, 2]); + let mut counts: Vec = items.iter().map(|_| 0).collect(); + + let mut balancer: WeightedLeastRequestBalancer = items.into_iter().map(Arc::new).collect(); + for _n in 0..20 { + // Get the content and drop the Arc reference to not increase the load factor + if let Some(index) = balancer.next_item(None).map(|item| item.value()) { + counts[index] += 1; + } + } + + assert_eq!(counts, vec![5, 5, 10]); + } + + #[test] + /// Just make sure that the balancer does not panic. + pub fn test_least_request_weight_overflow() { + let items: Vec<_> = endpoints_from_weights([u32::MAX - 1, 1, 2]); + let mut balancer: WeightedLeastRequestBalancer = items.into_iter().map(Arc::new).collect(); + for _n in 0..20 { + balancer.next_item(None); + } + + let items: Vec<_> = endpoints_from_weights(vec![u32::MAX / 4 + 1; 5]); + let mut balancer: WeightedLeastRequestBalancer = items.into_iter().map(Arc::new).collect(); + for _n in 0..20 { + balancer.next_item(None); + } + } + + #[test] + /// Expect items to be selected less frequently when its load is increased. + pub fn test_least_request_balancer_weights_load() { + fn expect_balancing( + balancer: &mut WeightedLeastRequestBalancer, + requests: usize, + expected_counts: &[usize], + ) { + let mut counts = vec![0; expected_counts.len()]; + for _ in 0..requests { + // Map the Arc to its content to not increase the load factor + if let Some(index) = balancer.next_item(None).map(|item| item.value()) { + counts[index] += 1; + } + } + assert_eq!(counts, expected_counts); + } + let items = endpoints_from_weights([1, 1, 2]); + let mut balancer: WeightedLeastRequestBalancer = items.iter().cloned().map(Arc::new).collect(); + + // No load + expect_balancing(&mut balancer, 20, &[5, 5, 10]); + + // Increase load factor of item 0 + { + let _load = [items[0].load_reference(), items[0].load_reference(), items[0].load_reference()]; + expect_balancing(&mut balancer, 20, &[3, 6, 11]); + } + + // Increase load factor of item 1 + { + let _load = [items[1].load_reference(), items[1].load_reference(), items[1].load_reference()]; + expect_balancing(&mut balancer, 20, &[5, 3, 12]); + } + + // Increase load factor of item 2 + { + let _load = [items[2].load_reference(), items[2].load_reference(), items[2].load_reference()]; + expect_balancing(&mut balancer, 20, &[7, 6, 7]); + } + } +} diff --git a/orion-lib/src/clusters/balancers/maglev.rs b/orion-lib/src/clusters/balancers/maglev.rs new file mode 100644 index 00000000..53b25747 --- /dev/null +++ b/orion-lib/src/clusters/balancers/maglev.rs @@ -0,0 +1,469 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, sync::Arc}; + +use http::uri::Authority; +use rand::Rng; + +use super::{ + default_balancer::{EndpointWithAuthority, LbItem}, + hash_policy::DeterministicBuildHasher, + Balancer, WeightedEndpoint, +}; + +/// A consistent balancer based on the this paper: +/// "Maglev: A Fast and Reliable Software Network Load Balancer" by Danielle E. Eisenbud et al. +/// +/// The basic idea is to build a lookup table with a number of entries per each endpoint +/// proportional to its weight. Maglev tries to guarantee that each endpoint at least has one +/// entry in the table, and to distribute the entries evenly so the load balancing is +/// consistent but fair. +#[derive(Debug, Clone)] +pub struct MaglevBalancer { + items: Vec>, + table: Vec, +} + +// The building of the table requires that the table size is a prime number. +// In this case, `(1 << 16) + 1`. See section 5.3 in the original paper. +const DEFAULT_TABLE_SIZE: usize = 65537; + +impl MaglevBalancer +where + E: EndpointWithAuthority, +{ + pub fn new(items: impl IntoIterator>) -> Self { + Self::with_size::(items) + } + + fn with_size(items: impl IntoIterator>) -> Self { + let () = as IsPrime>::IS_PRIME; + + let (mut items, total_weight) = collect_checked(items); + + // Sort the items so rebuilding the table will yield consistent results. + items.sort_by(|a, b| authority_sorting_key(a.item.authority()).cmp(authority_sorting_key(b.item.authority()))); + + let total_weight = f64::from(total_weight); + let max_normalized_weight = items.iter().map(|item| f64::from(item.weight) / total_weight).reduce(f64::max); + + let table = match (items.len(), max_normalized_weight) { + (_, None) => Vec::new(), // No items + (1, Some(_)) => vec![0], // A single item + (_, Some(max_normalized_weight)) => { + let mut permutations: Vec> = items + .iter() + .map(|item| { + Permutation::new( + item.item.authority(), + f64::from(item.weight) / total_weight, + max_normalized_weight, + ) + }) + .collect(); + + let mut table_builder = TableBuilder::::new(); + + // This implements the pseudocode from section 3.4 of the paper + // If there is an error in the code, it could enter an infinite loop. + // This is why there are debug assertions. + // In production, the loop will not panic and will struggle to produce something + // that works, even though not remotely close to the desired table. + 'table_loop: for _ in 0..TABLE_SIZE { + 'item_loop: for (item_index, permutation) in permutations.iter_mut().enumerate() { + if table_builder.is_full() { + break 'table_loop; + } + + // Skip this entry until it has accumulated enough weight + // The effect of this is that an item with `max_normalized_weight` will allocate + // an entry each iteration, while others less frequently. + if !permutation.has_enough_weight_after_iterating() { + continue; + } + + // This loop is guaranteed to walk the whole table in TABLE_SIZE steps if it is a prime number + for _ in 0..TABLE_SIZE { + if table_builder.try_update(permutation.next(), item_index).is_ok() { + continue 'item_loop; + } + } + + debug_assert!(false, "Maglev lookup table generator could enter an infinite loop"); + } + } + + table_builder.build() + }, + }; + + MaglevBalancer { items, table } + } +} + +/// Get a sorting key for [Authority] that implements [Ord]. In case you wonder, +/// this implementation is how [Authority] implements [PartialOrd]. +fn authority_sorting_key(authority: &Authority) -> impl Iterator + '_ { + authority.as_str().as_bytes().iter().map(u8::to_ascii_lowercase) +} + +struct TableBuilder { + table: Vec>, + size: usize, +} + +impl TableBuilder { + fn new() -> Self { + TableBuilder { table: vec![None; TABLE_SIZE], size: 0 } + } + + fn try_update(&mut self, index: usize, value: usize) -> Result<(), ()> { + if let Some(entry) = self.table.get_mut(index) { + if entry.is_none() { + *entry = Some(value); + self.size += 1; + Ok(()) + } else { + Err(()) + } + } else { + debug_assert!(false, "Unexpected invalid index while constructing Maglev lookup table"); + Err(()) + } + } + + fn is_full(&self) -> bool { + self.size >= TABLE_SIZE + } + + fn build(&mut self) -> Vec { + let table = self + .table + .iter() + .map(|entry| { + debug_assert!(entry.is_some(), "Incomplete Maglev lookup table"); + entry.unwrap_or_default() + }) + .collect(); + table + } +} + +struct Permutation { + offset: usize, + skip: usize, + weight: f64, + current_weight: f64, + target_weight: f64, + next: usize, +} + +impl Permutation { + const OFFSET_SEED: u64 = 0; + const SKIP_SEED: u64 = 1; + + fn new(authority: &Authority, weight: f64, target_weight: f64) -> Self { + let offset = usize::try_from(DeterministicBuildHasher::hash_one_with_seed(authority, Self::OFFSET_SEED)) + .unwrap_or(0) + % TABLE_SIZE; + let skip = (usize::try_from(DeterministicBuildHasher::hash_one_with_seed(authority, Self::SKIP_SEED)) + .unwrap_or(0) + % (TABLE_SIZE - 1)) + + 1; + debug_assert!((0..TABLE_SIZE).contains(&offset), "Offset is expected to be in 0..TABLE_SIZE"); + debug_assert!((1..TABLE_SIZE).contains(&skip), "Offset is expected to be in 1..TABLE_SIZE"); + Self { offset, skip, weight, current_weight: target_weight, target_weight, next: 0 } + } + + fn has_enough_weight_after_iterating(&mut self) -> bool { + self.current_weight += self.weight; + let has_reached_target = self.current_weight >= self.target_weight; + if has_reached_target { + self.current_weight -= self.target_weight; + } + has_reached_target + } + + fn next(&mut self) -> usize { + let index = (self.offset + self.skip * self.next) % TABLE_SIZE; + self.next += 1; + index + } +} + +/// Returns a valid subset of the items whose total weight fits in [u32]. +fn collect_checked(items: impl IntoIterator>) -> (Vec>, u32) { + let mut total = 0_u32; + let sanitized_subset = items + .into_iter() + .take_while(|item| { + let result = total.checked_add(item.weight); + if let Some(new_total) = result { + total = new_total; + } else { + tracing::warn!("Endpoint weight overflow in ring hash load balancer, will only use the endpoints whose weight sum fits in 32 bits"); + } + result.is_some() + }) + .collect(); + (sanitized_subset, total) +} + +impl Default for MaglevBalancer { + fn default() -> Self { + Self::new([]) + } +} + +impl Balancer for MaglevBalancer { + fn next_item(&mut self, hash: Option) -> Option> { + if self.items.len() <= 1 { + return self.items.first().map(|lb_item| &lb_item.item).cloned(); + } + + // If no hash is provided, a random one is generated + let hash = hash.unwrap_or(rand::thread_rng().gen()); + + let table_index = usize::try_from(hash).unwrap_or(usize::MAX) % self.table.len(); + + let index = self.table.get(table_index); + index.and_then(|index| self.items.get(*index)).map(|lb_item| &lb_item.item).cloned() + } +} + +impl FromIterator> for MaglevBalancer { + fn from_iter>>(iter: T) -> Self { + Self::new(iter.into_iter().map(|item| LbItem::new(item.weight(), item))) + } +} + +trait IsPrime { + const IS_PRIME: (); +} + +struct Prime; + +impl IsPrime for Prime { + const IS_PRIME: () = assert!(is_prime(N), "Maglev lookup table size is not a prime number"); +} + +const fn is_prime(n: usize) -> bool { + if n <= 1 { + return false; + } else if n <= 3 { + return true; + } + + // round up to power of 2, then take the sqrt which is simply halving the power + // and the trailing zeros are equal to n in 2^n + let next_square = (n as u128).next_power_of_two().trailing_zeros() + 1; + let upper_bound = 1 << (next_square / 2); + // if you make a sieve of multiples of 2 and multiples of 3 starting from 0 you get a pattern like + // x_xxx_x_xxx_x_xxx_ + // so jumps by 2. jumps by 4 alternating, which is incr xor 6 + let mut incr = 2; + + if n % 2 == 0 || n % 3 == 0 { + return false; + } + + let mut current = 5; + while current <= upper_bound { + if n % current == 0 { + return false; + } + current += incr; + incr ^= 6; + } + + true +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use http::uri::Authority; + use rand::{rngs::SmallRng, Rng, SeedableRng}; + + use crate::clusters::balancers::{Balancer, EndpointWithAuthority}; + + use super::{LbItem, MaglevBalancer}; + + struct TestEndpoint { + value: u32, + authority: Authority, + } + + impl TestEndpoint { + fn new(value: u32, authority: Authority) -> Self { + Self { value, authority } + } + } + + impl EndpointWithAuthority for TestEndpoint { + fn authority(&self) -> &Authority { + &self.authority + } + } + + fn get_authority(port: u32) -> Authority { + Authority::try_from(format!("example.com:{port}")).unwrap() + } + + fn balancer_from_distribution( + distribution: &[(u32, u32)], + ) -> MaglevBalancer { + let (values, weights): (Vec<_>, Vec<_>) = distribution.iter().copied().unzip(); + + let items: Vec<_> = + values.iter().map(|value| Arc::new(TestEndpoint::new(*value, get_authority(8000 + value)))).collect(); + + let lb_items = items.iter().cloned().zip(weights.iter()).map(|(item, weight)| LbItem::new(*weight, item)); + + MaglevBalancer::with_size::(lb_items) + } + + fn distribution_within_margin(a: &[f64], b: &[f64], error_margin: f64) { + for (value_a, value_b) in a.iter().zip(b.iter()) { + assert!( + (value_a - value_b).abs() / value_b < error_margin, + "Value {} is {:+.1}% of {} which is not within {:.1}%, when comparing [{}] to [{}]", + value_a, + (value_a - value_b) * 100. / value_b, + value_b, + error_margin * 100.0, + a.iter().map(|count| format!("{:.1}%", count * 100.)).collect::>().join(", "), + b.iter().map(|count| format!("{:.1}%", count * 100.)).collect::>().join(", "), + ); + } + } + + #[test] + fn maglev_balancer_empty() { + let mut empty_balancer: MaglevBalancer = MaglevBalancer::default(); + + assert!(empty_balancer.next_item(None).is_none(), "unexpected result in empty balancer"); + } + + #[test] + fn maglev_balancer_weight_overflow() { + const TABLE_SIZE: usize = 257; + + // Weights of 0 + 1 fit, weight of 2 overflows + let distribution: Vec<_> = vec![(0, u32::MAX - 2), (1, 1), (2, 2)]; + + let balancer = balancer_from_distribution::(&distribution); + + assert_eq!(balancer.items.len(), 2, "balancer did not detect a weight overflow"); + } + + #[test] + fn maglev_balancer_distribution() { + const TABLE_SIZE: usize = 257; + + let distributions = [ + (vec![(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1)], 0.05), + (vec![(0, 1), (1, 2), (2, 3), (3, 1), (4, 2), (5, 3), (6, 1), (7, 2), (8, 3), (9, 1)], 0.05), + (vec![(0, 1), (1, 300)], 0.2), + (vec![(0, 300), (1, 1)], 0.2), + (vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10)], 0.1), + ]; + + for (distribution, error_margin) in distributions { + let weights: Vec<_> = distribution.iter().map(|(_value, weight)| *weight).collect(); + let total_weight: u32 = weights.iter().sum(); + let normalized_weights: Vec<_> = + weights.iter().map(|weight| f64::from(*weight) / f64::from(total_weight)).collect(); + + let balancer = balancer_from_distribution::(&distribution); + + let mut counts = vec![0_u32; distribution.len()]; + + balancer.table.iter().for_each(|index| counts[balancer.items[*index].item.value as usize] += 1); + let ring_size: u32 = counts.iter().sum(); + + assert_eq!(ring_size as usize, TABLE_SIZE, "wrong table size"); + assert!( + counts.iter().all(|count| *count > 0), + "item is zero in {counts:?} for distribution {distribution:?}" + ); + + // Check that the ring has a distribution that fits the expected weights within margin of error + distribution_within_margin( + &counts.iter().map(|count| f64::from(*count) / f64::from(ring_size)).collect::>(), + &normalized_weights, + error_margin, + ); + } + } + + #[test] + fn maglev_balancer_overflow() { + const TABLE_SIZE: usize = 47; + + // When trying to insert 100 endpoints into a 10-item ring, expect that only 10 endpoints are allocated + let distribution: Vec<_> = (0..100).map(|value| (value, 1)).collect(); + + let balancer = balancer_from_distribution::(&distribution); + + let mut counts = vec![0_u32; distribution.len()]; + + balancer.table.iter().for_each(|index| counts[balancer.items[*index].item.value as usize] += 1); + let ring_size: u32 = counts.iter().sum(); + + assert_eq!(ring_size as usize, TABLE_SIZE, "wrong table size"); + + assert_eq!( + counts.iter().filter(|count| **count > 0).count(), + TABLE_SIZE, + "item is zero in {counts:?} for distribution {distribution:?}" + ); + } + + #[test] + fn maglev_balancer_consistent() { + const TABLE_SIZE: usize = 47; + + // 10 endpoints with different weights and addresses `example.com:800X`, where X is 0..10 + let distributions = [ + [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1)], + [(0, 1), (1, 2), (2, 3), (3, 1), (4, 2), (5, 3), (6, 1), (7, 2), (8, 3), (9, 1)], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10)], + ]; + + for distribution in distributions { + let mut balancer = balancer_from_distribution::(&distribution); + + // Test 100 requests with a random hash each one + let mut rng = SmallRng::seed_from_u64(1); + for _ in 0..100 { + let hash = rng.gen(); + + // Check that load balancing is consistent for this request + (0..10).map(|_| balancer.next_item(Some(hash)).unwrap().value).reduce(|initial, current| { + assert_eq!(initial, current, "Maglev result is not consistent"); + initial + }); + } + } + } +} diff --git a/orion-lib/src/clusters/balancers/mod.rs b/orion-lib/src/clusters/balancers/mod.rs new file mode 100644 index 00000000..ec0ebae4 --- /dev/null +++ b/orion-lib/src/clusters/balancers/mod.rs @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::sync::Arc; + +mod default_balancer; +pub(crate) mod hash_policy; +pub(crate) mod healthy; +pub(crate) mod least; +pub(crate) mod maglev; +pub(crate) mod priority; +pub(crate) mod random; +pub(crate) mod ring; +pub(crate) mod wrr; + +pub use default_balancer::{DefaultBalancer, EndpointWithAuthority, EndpointWithLoad, WeightedEndpoint}; + +pub trait Balancer { + fn next_item(&mut self, hash: Option) -> Option>; +} diff --git a/orion-lib/src/clusters/balancers/priority.rs b/orion-lib/src/clusters/balancers/priority.rs new file mode 100644 index 00000000..0d9280f5 --- /dev/null +++ b/orion-lib/src/clusters/balancers/priority.rs @@ -0,0 +1,140 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use rustc_hash::FxHashMap as HashMap; + +#[derive(Debug, Clone)] +pub(crate) struct PriorityInfo { + pub balancer: B, + pub healthy: u32, + pub total: u32, +} + +pub struct Priority; + +impl Priority { + fn calculate_priority_health(healthy: u32, total: u32) -> f64 { + let x = f64::from(100 * healthy / total); + f64::min(100.0, 1.4 * x) + } + + /// + /// Implementation and test cases taken from Envoy's documentation + /// https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/priority + /// + /// health(P_X) = min(100, 1.4 * 100 * healthy_P_X_backends / total_P_X_backends) + /// normalized_total_health = min(100, Σ(health(P_0)...health(P_X))) + /// priority_load(P_0) = health(P_0) * 100 / normalized_total_health + /// priority_load(P_X) = min(100 - Σ(priority_load(P_0)..priority_load(P_X-1)), + /// health(P_X) * 100 / normalized_total_health) + /// + /// + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::similar_names)] + pub fn calculate_priority_loads(endpoints: &HashMap>) -> Vec<(u32, u32)> { + let mut priority_health = vec![]; + let mut sorted_endpoints = endpoints.iter().collect::>(); + sorted_endpoints.sort_by(|a, b| a.0.cmp(b.0)); + for (k, v) in &sorted_endpoints { + priority_health.push((*k, Self::calculate_priority_health(v.healthy, v.total))); + } + + let normalized_total_health = f64::min(100.0, priority_health.iter().map(|(_, p)| p).sum()); + let (p0_k, p) = priority_health.remove(0); + let p_0_load = p * 100.0 / normalized_total_health; + let mut p_load = vec![(p0_k, p_0_load)]; + + for (k, x) in priority_health { + let load = 100.0 - p_load.iter().map(|(_, p)| p).sum::(); + let normalized_p_x_health = x * 100.0 / normalized_total_health; + let p_x_load = f64::min(load, normalized_p_x_health); + p_load.push((k, p_x_load)); + } + p_load.into_iter().map(|(k, f)| (*k, f64::round(f) as u32)).collect() + } +} + +#[cfg(test)] +mod test { + + use orion_data_plane_api::envoy_data_plane_api::envoy::config::endpoint::v3::LbEndpoint; + use rustc_hash::FxHashMap as HashMap; + + use super::PriorityInfo; + use crate::clusters::balancers::{priority::Priority, random::RandomBalancer}; + + fn generate_endpoints( + (p1, h1, e1): (u32, u32, u32), + (p2, h2, e2): (u32, u32, u32), + (p3, h3, e3): (u32, u32, u32), + ) -> HashMap>> { + let pi1 = PriorityInfo { balancer: RandomBalancer::new(vec![]), healthy: h1, total: e1 }; + + let pi2 = PriorityInfo { balancer: RandomBalancer::new(vec![]), healthy: h2, total: e2 }; + + let pi3 = PriorityInfo { balancer: RandomBalancer::new(vec![]), healthy: h3, total: e3 }; + let mut map = HashMap::default(); + map.insert(p1, pi1); + map.insert(p2, pi2); + map.insert(p3, pi3); + map + } + + #[test] + pub fn calculate_priority_loads_test() { + let m = generate_endpoints((0, 100, 100), (1, 100, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 100), (1, 0), (2, 0)]); + + let m = generate_endpoints((0, 72, 100), (1, 72, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 100), (1, 0), (2, 0)]); + + let m = generate_endpoints((0, 71, 100), (1, 71, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 99), (1, 1), (2, 0)]); + + let m = generate_endpoints((0, 50, 100), (1, 50, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 70), (1, 30), (2, 0)]); + + let m = generate_endpoints((0, 25, 100), (1, 100, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 35), (1, 65), (2, 0)]); + + let m = generate_endpoints((0, 25, 100), (1, 25, 100), (2, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 35), (1, 35), (2, 30)]); + + let m = generate_endpoints((0, 25, 100), (1, 25, 100), (2, 20, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(0, 36), (1, 36), (2, 29)]); + } + + #[test] + pub fn calculate_priority_loads_test_non_contiguous_priorities() { + let m = generate_endpoints((1, 50, 100), (3, 50, 100), (5, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(1, 70), (3, 30), (5, 0)]); + + let m = generate_endpoints((1, 25, 100), (3, 25, 100), (5, 100, 100)); + let p: Vec<(_, _)> = Priority::calculate_priority_loads(&m).into_iter().collect(); + assert_eq!(p, [(1, 35), (3, 35), (5, 30)]); + } +} diff --git a/orion-lib/src/clusters/balancers/random.rs b/orion-lib/src/clusters/balancers/random.rs new file mode 100644 index 00000000..15783f43 --- /dev/null +++ b/orion-lib/src/clusters/balancers/random.rs @@ -0,0 +1,195 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, sync::Arc}; + +use rand::{ + distributions::{Distribution, WeightedIndex}, + rngs::SmallRng, + SeedableRng, +}; + +use super::{default_balancer::LbItem, Balancer, WeightedEndpoint}; + +#[derive(Debug, Clone)] +pub struct RandomBalancer { + items: Vec>, + weighted_index: Option>, + rng: SmallRng, +} + +impl RandomBalancer { + pub fn new(items: impl IntoIterator>) -> Self { + let rng = SmallRng::from_rng(rand::thread_rng()).expect("RNG must be valid"); + RandomBalancer::new_with_rng(items, rng) + } + + fn new_with_rng(items: impl IntoIterator>, rng: SmallRng) -> Self { + let mut balancer = RandomBalancer { items: collect_checked(items), weighted_index: None, rng }; + balancer.weighted_index = WeightedIndex::new(balancer.items.iter().map(|item| item.weight)).ok(); + balancer + } +} + +/// Returns a valid subset of the items whose total weight fits in [u32]. +fn collect_checked(items: impl IntoIterator>) -> Vec> { + let mut total = 0_u32; + items + .into_iter() + .take_while(|item| { + let result = total.checked_add(item.weight); + if let Some(new_total) = result { + total = new_total; + } else { + tracing::warn!("Endpoint weight overflow in random load balancer, will only use the endpoints whose weight sum fits in 32 bits"); + } + result.is_some() + }) + .collect() +} + +impl Default for RandomBalancer { + fn default() -> Self { + Self::from_iter([]) + } +} + +impl Balancer for RandomBalancer { + fn next_item(&mut self, _hash: Option) -> Option> { + self.items.get(self.weighted_index.as_ref()?.sample(&mut self.rng)).map(|item| &item.item).cloned() + } +} + +impl FromIterator> for RandomBalancer { + fn from_iter>>(iter: T) -> Self { + RandomBalancer::new(iter.into_iter().map(|item| LbItem::new(item.weight(), item))) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use rand::{rngs::SmallRng, SeedableRng}; + + use crate::clusters::balancers::{ + random::{LbItem, RandomBalancer}, + Balancer, + }; + + #[test] + pub fn test_random_balancer_1() { + #[derive(Debug, Clone, PartialEq)] + struct Blah { + value: u32, + } + let b1 = Blah { value: 0 }; + let b2 = Blah { value: 1 }; + let b3 = Blah { value: 2 }; + + let ab1 = Arc::new(b1); + let ab2 = Arc::new(b2); + let ab3 = Arc::new(b3); + + let expected_b1 = Arc::clone(&ab1); + let expected_b2 = Arc::clone(&ab2); + let expected_b3 = Arc::clone(&ab3); + + let items = [LbItem::new(1, ab1), LbItem::new(1, ab2), LbItem::new(1, ab3)]; + + let gen = SmallRng::seed_from_u64(1); + let mut random_lb = RandomBalancer::new_with_rng(items, gen); + let mut selected_items = vec![]; + for _n in 0..10 { + selected_items.push(random_lb.next_item(None)); + } + let selected_items: Vec<_> = selected_items.into_iter().flatten().collect(); + println!("{selected_items:?}"); + + assert_eq!( + selected_items, + vec![ + Arc::clone(&expected_b3), + Arc::clone(&expected_b1), + Arc::clone(&expected_b1), + Arc::clone(&expected_b2), + Arc::clone(&expected_b2), + Arc::clone(&expected_b3), + Arc::clone(&expected_b1), + Arc::clone(&expected_b2), + Arc::clone(&expected_b1), + Arc::clone(&expected_b3) + ] + ); + } + + #[test] + pub fn test_random_balancer_2() { + let items = [LbItem::new(1, Arc::new(0)), LbItem::new(1, Arc::new(1)), LbItem::new(2, Arc::new(2))]; + let mut counts = vec![0_u32; items.len()]; + + let gen = SmallRng::seed_from_u64(1); + let mut random_lb = RandomBalancer::new_with_rng(items, gen); + + for _n in 0..20 { + counts[random_lb.next_item(None).map(|item| *item).unwrap()] += 1; + } + + assert_eq!(counts, vec![5, 6, 9]); + } + + #[test] + pub fn test_random_balancer_3() { + let items = [LbItem::new(1, Arc::new(0)), LbItem::new(2, Arc::new(1)), LbItem::new(4, Arc::new(2))]; + let mut counts = vec![0_u32; items.len()]; + + let gen = SmallRng::seed_from_u64(1); + let mut random_lb = RandomBalancer::new_with_rng(items, gen); + let mut selected_items = vec![]; + for _n in 0..20 { + selected_items.push(random_lb.next_item(None)); + } + let selected_items: Vec<_> = selected_items.into_iter().flatten().collect(); + + for i in selected_items { + counts[*i] += 1; + } + + assert_eq!(counts, vec![2, 6, 12]); + } + + #[test] + fn random_balancer_weight_overflow() { + let items = [ + LbItem::new(u32::MAX / 2, Arc::new(0)), + LbItem::new(u32::MAX / 2, Arc::new(1)), + LbItem::new(1, Arc::new(2)), + ]; + let mut counts = vec![0_u32; items.len()]; + let gen = SmallRng::seed_from_u64(1); + let mut random_lb = RandomBalancer::new_with_rng(items, gen); + for _ in 0..20 { + counts[*random_lb.next_item(None).unwrap()] += 1; + } + assert!(counts[0] > 0, "expected first item to be selected"); + assert!(counts[1] > 0, "expected second item to be selected"); + assert_eq!(counts[2], 0, "expected third item to not be selected"); + } +} diff --git a/orion-lib/src/clusters/balancers/ring.rs b/orion-lib/src/clusters/balancers/ring.rs new file mode 100644 index 00000000..a7942376 --- /dev/null +++ b/orion-lib/src/clusters/balancers/ring.rs @@ -0,0 +1,359 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{ + fmt::Debug, + hash::{Hash, Hasher}, + sync::Arc, +}; + +use rand::Rng; + +use super::{ + default_balancer::{EndpointWithAuthority, LbItem}, + hash_policy::DeterministicBuildHasher, + Balancer, WeightedEndpoint, +}; + +/// A consistent balancer based on the "ketama hash" algorithm. +/// +/// A ring contains slots proportional to the weight of each endpoint. When doing load balancing, +/// one slot is selected based on the closest hash of the request. +/// +/// When there are not enough slots in the ring, some endpoints don't get one. This can happen +/// because there are more endpoints than slots, or because one endpoint has an exaggerated weight +/// over the others. Slots are assigned on a FIFO basis, so for a ring size of 20, +/// `[(endpoint0, 50), (endpoint1, 1)]` should only contain one endpoint, while +/// `[(endpoint1, 1), (endpoint0, 50)]` should contain both. +#[derive(Debug, Clone)] +pub struct RingHashBalancer { + items: Vec>, + ring: Vec<(u64, usize)>, +} + +const DEFAULT_MIN_RING_SIZE: u32 = 1024; +const DEFAULT_MAX_RING_SIZE: u32 = 1024 * 1024 * 8; + +impl RingHashBalancer +where + E: EndpointWithAuthority, +{ + pub fn new(items: impl IntoIterator>) -> Self { + Self::new_with_settings(items, DEFAULT_MIN_RING_SIZE, DEFAULT_MAX_RING_SIZE) + } + + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + fn new_with_settings(items: impl IntoIterator>, min_ring_size: u32, max_ring_size: u32) -> Self { + let (items, total_weight) = collect_checked(items); + + // Calculate the size of the ring so all endpoints have at least one slot + let total_weight = f64::from(total_weight); + let normalized_weights: Vec = items.iter().map(|item| f64::from(item.weight) / total_weight).collect(); + + let mut ring = Vec::new(); + if let Some(minimum_weight) = normalized_weights.iter().copied().reduce(f64::min) { + let scale = + ((minimum_weight * f64::from(min_ring_size)).ceil() / minimum_weight).min(f64::from(max_ring_size)); + + let ring_size = scale.ceil() as usize; // there is no usize::try_from::() yet + ring.reserve(ring_size); + + // Add entries to the ring until it reaches the target number according to + // the weight and the scale factor for this item. + let mut current_slots = 0.0; + let mut target_slots = 0.0; + for (index, (item, weight)) in items.iter().zip(normalized_weights).enumerate() { + let item_key = item.item.authority(); + + let item_slots = weight * scale; + target_slots += item_slots; + + let mut slot_index = 0; + while current_slots < target_slots { + let mut hasher = DeterministicBuildHasher::build_hasher(); + item_key.hash(&mut hasher); + slot_index.hash(&mut hasher); + let hash = hasher.finish(); + + ring.push((hash, index)); + current_slots += 1.0; + slot_index += 1; + } + } + + // The ring has to be sorted for the binary search in `next_item()` to work + ring.sort_by_key(|(hash, _)| *hash); + } + + RingHashBalancer { items, ring } + } +} + +/// Returns a valid subset of the items whose total weight fits in [u32]. +fn collect_checked(items: impl IntoIterator>) -> (Vec>, u32) { + let mut total = 0_u32; + let sanitized_subset = items + .into_iter() + .take_while(|item| { + let result = total.checked_add(item.weight); + if let Some(new_total) = result { + total = new_total; + } else { + tracing::warn!("Endpoint weight overflow in ring hash load balancer, will only use the endpoints whose weight sum fits in 32 bits"); + } + result.is_some() + }) + .collect(); + (sanitized_subset, total) +} + +impl Default for RingHashBalancer { + fn default() -> Self { + Self::new([]) + } +} + +impl Balancer for RingHashBalancer { + fn next_item(&mut self, hash: Option) -> Option> { + if self.items.len() <= 1 { + return self.items.first().map(|lb_item| &lb_item.item).cloned(); + } + + // If no hash is provided, a random one is generated + let hash = hash.unwrap_or(rand::thread_rng().gen()); + + // Find the closest entry doing a binary search of the hash + let ring_index = match self.ring.binary_search_by_key(&hash, |(hash, _)| *hash) { + Ok(matching_index) => matching_index, + Err(closest_index) => closest_index.min(self.ring.len() - 1), + }; + + let (_, index) = self.ring.get(ring_index)?; + + self.items.get(*index).map(|lb_item| &lb_item.item).cloned() + } +} + +impl FromIterator> for RingHashBalancer { + fn from_iter>>(iter: T) -> Self { + Self::new(iter.into_iter().map(|item| LbItem::new(item.weight(), item))) + } +} + +#[cfg(test)] +mod test { + use std::{ops::ControlFlow, sync::Arc}; + + use http::uri::Authority; + use rand::{rngs::SmallRng, Rng, SeedableRng}; + + use crate::clusters::balancers::{Balancer, EndpointWithAuthority}; + + use super::{LbItem, RingHashBalancer}; + + struct TestEndpoint { + value: u32, + authority: Authority, + } + + impl TestEndpoint { + fn new(value: u32, authority: Authority) -> Self { + Self { value, authority } + } + } + + impl EndpointWithAuthority for TestEndpoint { + fn authority(&self) -> &Authority { + &self.authority + } + } + + fn is_sorted(mut iter: impl Iterator) -> bool { + if let Some(first) = iter.next() { + iter.try_fold( + first, + |prev, current| if prev <= current { ControlFlow::Continue(current) } else { ControlFlow::Break(()) }, + ) + .is_continue() + } else { + true + } + } + + fn get_authority(port: u32) -> Authority { + Authority::try_from(format!("example.com:{port}")).unwrap() + } + + fn balancer_from_distribution( + distribution: &[(u32, u32)], + ring_min: u32, + ring_max: u32, + ) -> RingHashBalancer { + let (values, weights): (Vec<_>, Vec<_>) = distribution.iter().copied().unzip(); + + let items: Vec<_> = + values.iter().map(|value| Arc::new(TestEndpoint::new(*value, get_authority(8000 + value)))).collect(); + + let lb_items = items.iter().cloned().zip(weights.iter()).map(|(item, weight)| LbItem::new(*weight, item)); + + let balancer = RingHashBalancer::new_with_settings(lb_items, ring_min, ring_max); + + assert!(is_sorted(balancer.ring.iter().map(|(hash, _weight)| hash)), "ring is not sorted"); + + balancer + } + + fn distribution_within_margin(a: &[f64], b: &[f64], error_margin: f64) { + for (value_a, value_b) in a.iter().zip(b.iter()) { + assert!( + (value_a - value_b).abs() / value_b < error_margin, + "Value {} is {:+.1}% of {} which is not within {:.1}%, when comparing [{}] to [{}]", + value_a, + (value_a - value_b) * 100. / value_b, + value_b, + error_margin * 100.0, + a.iter().map(|count| format!("{:.1}%", count * 100.)).collect::>().join(", "), + b.iter().map(|count| format!("{:.1}%", count * 100.)).collect::>().join(", "), + ); + } + } + + #[test] + fn ring_balancer_empty() { + let mut empty_balancer: RingHashBalancer = RingHashBalancer::default(); + + assert!(empty_balancer.next_item(None).is_none(), "unexpected output from empty load balancer"); + } + + #[test] + fn ring_balancer_weight_overflow() { + const RING_MIN: u32 = 1; + const RING_MAX: u32 = 10; + + // Weights of 0 + 1 fit, weight of 2 overflows + let distribution: Vec<_> = vec![(0, u32::MAX - 2), (1, 1), (2, 2)]; + + let balancer = balancer_from_distribution(&distribution, RING_MIN, RING_MAX); + + assert_eq!(balancer.items.len(), 2, "balancer did not detect a weight overflow"); + } + + #[test] + fn ring_balancer_distribution() { + const RING_MIN: u32 = 10; + const RING_MAX: u32 = 50; + + let distributions = [ + // Should only need 10 slots + (vec![(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1)], 0.05), + // Should only need 16 slots + (vec![(0, 1), (1, 2), (2, 3), (3, 1), (4, 2), (5, 3), (6, 1), (7, 2), (8, 3), (9, 1)], 0.05), + // Should need 51 slots, which should be capped at `ring_max_size` + // Beware: [(0, 50), (1, 1)] would fail, because slots are assigned on a FIFO basis, + // so the 50 weight would consume all the slots, even after normalizing the weights. + (vec![(0, 1), (1, 50)], 0.05), + // Should need 55 slots, which should be capped at `ring_max_size` + (vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10)], 0.15), + ]; + + for (distribution, error_margin) in distributions { + let weights: Vec<_> = distribution.iter().map(|(_value, weight)| *weight).collect(); + let total_weight: u32 = weights.iter().sum(); + let normalized_weights: Vec<_> = + weights.iter().map(|weight| f64::from(*weight) / f64::from(total_weight)).collect(); + + let balancer = balancer_from_distribution(&distribution, RING_MIN, RING_MAX); + + let mut counts = vec![0_u32; distribution.len()]; + + balancer.ring.iter().for_each(|(_hash, index)| counts[balancer.items[*index].item.value as usize] += 1); + let ring_size: u32 = counts.iter().sum(); + + assert!((RING_MIN..=RING_MAX).contains(&ring_size)); + assert!( + counts.iter().all(|count| *count > 0), + "item is zero in {counts:?} for distribution {distribution:?}" + ); + + // Check that the ring has a distribution that fits the expected weights within 5% of error + distribution_within_margin( + &counts.iter().map(|count| f64::from(*count) / f64::from(ring_size)).collect::>(), + &normalized_weights, + error_margin, + ); + } + } + + #[test] + fn ring_balancer_overflow() { + const RING_MIN: u32 = 1; + const RING_MAX: u16 = 10; + + // When trying to insert 20 endpoints into a 10-item ring, expect that only 10 endpoints are allocated + let distribution: Vec<_> = (0..20).map(|value| (value, 1)).collect(); + + let balancer = balancer_from_distribution(&distribution, RING_MIN, u32::from(RING_MAX)); + + let mut counts = vec![0_u32; distribution.len()]; + + balancer.ring.iter().for_each(|(_hash, index)| counts[balancer.items[*index].item.value as usize] += 1); + let ring_size: u32 = counts.iter().sum(); + + assert!( + (RING_MIN..=u32::from(RING_MAX)).contains(&ring_size), + "ring size {ring_size} outside the {RING_MIN}..={RING_MAX} bounds" + ); + + assert_eq!( + counts.iter().filter(|count| **count > 0).count(), + usize::from(RING_MAX), + "item is zero in {counts:?} for distribution {distribution:?}" + ); + } + + #[test] + fn ring_balancer_consistent() { + const RING_MIN: u32 = 10; + const RING_MAX: u32 = 20; + + // 10 endpoints with different weights and addresses `example.com:800X`, where X is 0..10 + let distributions = [ + [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1)], + [(0, 1), (1, 2), (2, 3), (3, 1), (4, 2), (5, 3), (6, 1), (7, 2), (8, 3), (9, 1)], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10)], + ]; + + for distribution in distributions { + let mut balancer = balancer_from_distribution(&distribution, RING_MIN, RING_MAX); + + // Test 100 requests with a random hash each one + let mut rng = SmallRng::seed_from_u64(1); + for _ in 0..100 { + let hash = rng.gen(); + + // Check that load balancing is consistent for this request + (0..10).map(|_| balancer.next_item(Some(hash)).unwrap().value).reduce(|initial, current| { + assert_eq!(initial, current, "different endpoint for the same request"); + initial + }); + } + } + } +} diff --git a/orion-lib/src/clusters/balancers/wrr.rs b/orion-lib/src/clusters/balancers/wrr.rs new file mode 100644 index 00000000..fb2e602a --- /dev/null +++ b/orion-lib/src/clusters/balancers/wrr.rs @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{fmt::Debug, sync::Arc}; + +use super::{Balancer, WeightedEndpoint}; + +/// The round robin load balancer select 1 available host in round robin order using +/// the smooth weighted round-robin balancing algorithm. +/// +/// See implementation in nginx +/// . + +#[derive(Clone, Debug)] +pub struct LbItem { + weight: u32, + current_weight: i32, + item: Arc, +} + +impl LbItem { + pub fn new(weight: u32, item: Arc) -> Self { + Self { item, weight, current_weight: 0 } + } + + fn increase_curent_weight(&mut self) { + self.adjust_current_weight(i32::try_from(self.weight).unwrap_or(i32::MAX)); + } + + fn adjust_current_weight(&mut self, value: i32) { + self.current_weight = self.current_weight.saturating_add(value); + } +} + +#[derive(Debug, Clone)] +pub struct WeightedRoundRobinBalancer { + items: Vec>, +} + +impl WeightedRoundRobinBalancer { + pub fn new(items: impl IntoIterator>) -> Self { + WeightedRoundRobinBalancer { items: collect_checked(items) } + } +} + +/// Returns a valid subset of the items whose total weight fits in [u32]. +fn collect_checked(items: impl IntoIterator>) -> Vec> { + let mut total = 0_u32; + items + .into_iter() + .take_while(|item| { + let result = total.checked_add(item.weight); + if let Some(new_total) = result { + total = new_total; + } else { + tracing::warn!("Endpoint weight overflow in round robin load balancer, will only use the endpoints whose weight sum fits in 32 bits"); + } + result.is_some() + }) + .collect() +} + +impl Default for WeightedRoundRobinBalancer { + fn default() -> Self { + Self::from_iter([]) + } +} + +impl Balancer for WeightedRoundRobinBalancer { + fn next_item(&mut self, _hash: Option) -> Option> { + if self.items.len() <= 1 { + self.items.first().map(|item| &item.item).cloned() + } else { + // Increase the current weight of all the endpoints + self.items.iter_mut().for_each(LbItem::increase_curent_weight); + // Calculate the total weight + let total: i32 = self + .items + .iter() + .map(|item| i32::try_from(item.weight).unwrap_or(i32::MAX)) + .fold(0, i32::saturating_add); + // Find the item with the highest weight + // Note: not using `max_by` here because it returns the last element for equal weights + let best_item = + self.items + .iter_mut() + .reduce(|best, item| if item.current_weight > best.current_weight { item } else { best }); + // Adjust its weight and return it + best_item.map(|item| { + item.adjust_current_weight(-total); + Arc::clone(&item.item) + }) + } + } +} + +impl FromIterator> for WeightedRoundRobinBalancer { + fn from_iter>>(iter: T) -> Self { + Self::new(iter.into_iter().map(|item| LbItem::new(item.weight(), item))) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use crate::clusters::balancers::Balancer; + + use super::{LbItem, WeightedRoundRobinBalancer}; + + fn compare_rotated(a: impl IntoIterator, b: impl IntoIterator) { + let mut a = a.into_iter().peekable(); + let mut b = b.into_iter().peekable(); + while a.peek() != b.peek() { + a.next(); + } + + assert!(a.zip(b).all(|(a, b)| a == b)); + } + + #[test] + pub fn test_wrr_balancer_0() { + #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] + struct Blah { + value: u32, + } + let b1 = Blah { value: 0 }; + let b2 = Blah { value: 1 }; + + let ab1 = Arc::new(b1); + let ab2 = Arc::new(b2); + let items = [Arc::clone(&ab1), Arc::clone(&ab2)]; + + let lb_items = [LbItem::new(1, ab1), LbItem::new(1, ab2)]; + + let mut wrr = WeightedRoundRobinBalancer::new(lb_items); + let mut selected_items = vec![]; + for _n in 0..7 { + selected_items.push(wrr.next_item(None)); + } + + compare_rotated(selected_items, items.into_iter().map(Some)); + } + + #[test] + pub fn test_wrr_balancer_1() { + #[derive(Debug, Clone, PartialEq)] + struct Blah { + value: u32, + } + let b1 = Blah { value: 0 }; + let b2 = Blah { value: 1 }; + let b3 = Blah { value: 2 }; + + let ab1 = Arc::new(b1); + let ab2 = Arc::new(b2); + let ab3 = Arc::new(b3); + + let items = [Arc::clone(&ab1), Arc::clone(&ab2), Arc::clone(&ab3)]; + + let lb_items = [LbItem::new(1, ab1), LbItem::new(1, ab2), LbItem::new(1, ab3)]; + + let mut wrr = WeightedRoundRobinBalancer::new(lb_items); + let mut selected_items = vec![]; + for _n in 0..10 { + selected_items.push(wrr.next_item(None)); + } + + println!("Selected: {selected_items:?}"); + + compare_rotated(selected_items, items.into_iter().map(Some)); + } + + #[test] + pub fn test_wrr_balancer_2() { + let mut items = vec![]; + let mut counts = vec![]; + for n in 0..3 { + items.push(LbItem::new(1, Arc::new(n))); + counts.push(0); + } + let item = &mut items[2]; + item.weight = 2; + + let mut wrr = WeightedRoundRobinBalancer::new(items); + let mut selected_items = vec![]; + for _n in 0..20 { + selected_items.push(wrr.next_item(None)); + } + let mut selected_items: Vec<_> = selected_items.into_iter().flatten().collect(); + selected_items.sort_unstable(); + + for i in selected_items { + counts[*i] += 1; + } + assert_eq!(counts, vec![5, 5, 10]); + } + + #[test] + pub fn test_wrr_balancer_3() { + let mut items = vec![]; + let mut counts = vec![]; + for n in 0..3 { + items.push(LbItem::new(1, Arc::new(n))); + counts.push(0); + } + let item = &mut items[1]; + item.weight = 2; + let item = &mut items[2]; + item.weight = 4; + + let mut wrr = WeightedRoundRobinBalancer::new(items); + let mut selected_items = vec![]; + for _n in 0..20 { + selected_items.push(wrr.next_item(None)); + } + + for i in selected_items.into_iter().flatten() { + counts[*i] += 1; + } + + assert_eq!(counts, vec![3, 6, 11]); + } + + #[test] + /// Just make sure that the balancer does not panic. + fn test_wrr_balancer_weight_overflow() { + let items = [ + LbItem::new(u32::MAX / 2, Arc::new(0)), + LbItem::new(u32::MAX / 2, Arc::new(1)), + LbItem::new(u32::MAX / 2, Arc::new(2)), + ]; + let mut wrr = WeightedRoundRobinBalancer::new(items); + for _ in 0..20 { + // Not checking which items have been chosen, as the arithmetic conversions distort the algorithm too much. + // Just make sure something is chosen. + assert!(wrr.next_item(None).is_some(), "expected balancer to select one item"); + } + } +} diff --git a/orion-lib/src/clusters/cached_watch.rs b/orion-lib/src/clusters/cached_watch.rs new file mode 100644 index 00000000..298a9af8 --- /dev/null +++ b/orion-lib/src/clusters/cached_watch.rs @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use parking_lot::RwLock; +use std::sync::atomic::{AtomicUsize, Ordering}; + +pub struct CachedWatch { + value: RwLock, + version: AtomicUsize, +} + +impl CachedWatch { + pub const fn new(value: T) -> Self { + Self { value: RwLock::new(value), version: AtomicUsize::new(0) } + } + + pub fn version(&self) -> usize { + self.version.load(Ordering::Relaxed) + } + + #[allow(unused)] + pub fn set(&self, value: T) { + self.update(move |current| *current = value) + } + + pub fn update R>(&self, f: F) -> R { + let mut w_lock = self.value.write(); + let ret = f(&mut w_lock); + self.version.fetch_add(1, Ordering::Relaxed); + ret + } + + pub fn get_clone(&self) -> (T, usize) { + let r_lock = self.value.read(); + let value = r_lock.clone(); + let version = self.version(); + (value, version) + } + + pub fn watcher(&self) -> CachedWatcher<'_, T> { + let (local, version) = self.get_clone(); + CachedWatcher { parent: self, version, local } + } +} + +pub struct CachedWatcher<'a, T: Clone> { + parent: &'a CachedWatch, + version: usize, + local: T, +} + +impl<'a, T: Clone> CachedWatcher<'a, T> { + pub fn cached_or_latest(&mut self) -> &mut T { + let parent_version = self.parent.version(); + if parent_version != self.version { + // this read lock might fail if the parent is being updated again + // in which case we continue using the old version under the assumption that + // updates will be infrequent w.r.t. reads. + // it's possible to add some logic here to stall and force an update if we fail to update too many times + // but be mindfull that some implementations of RwLock always let writes go first so we can still get starved of updates + // unless we also tell the parent to wait and let us read first before allowing another write lock to be acquired. + if let Some(r_lock) = self.parent.value.try_read() { + self.local = r_lock.clone(); + self.version = self.parent.version(); + } + } + &mut self.local + } +} diff --git a/orion-lib/src/clusters/cluster.rs b/orion-lib/src/clusters/cluster.rs new file mode 100644 index 00000000..c69dca8d --- /dev/null +++ b/orion-lib/src/clusters/cluster.rs @@ -0,0 +1,446 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use compact_str::{CompactString, ToCompactString}; +use enum_dispatch::enum_dispatch; +use futures::future::BoxFuture; +use http::uri::Authority; + +use orion_configuration::config::cluster::ClusterDiscoveryType; +use orion_configuration::config::cluster::{Cluster as ClusterConfig, HealthCheck, LbPolicy}; +use rustls::ClientConfig; +use tokio::net::TcpStream; +use tracing::debug; +use webpki::types::ServerName; + +use super::balancers::hash_policy::HashState; +use super::{health::HealthStatus, load_assignment::ClusterLoadAssignment}; +use crate::clusters::load_assignment::PartialClusterLoadAssignment; +use crate::transport::{GrpcService, HttpChannel}; +use crate::{ + clusters::load_assignment::ClusterLoadAssignmentBuilder, + secrets::{TlsConfigurator, TransportSecret, WantsToBuildClient}, + transport::{bind_device::BindDevice, connector::ConnectError, TcpChannel}, + Error, Result, SecretManager, +}; + +pub type TcpService = BoxFuture<'static, std::result::Result>; + +#[derive(Debug, Clone)] +pub struct StaticCluster { + pub name: CompactString, + pub load_assignment: ClusterLoadAssignment, + pub tls_configurator: Option>, + pub health_check: Option, +} + +#[derive(Debug, Clone)] +pub struct DynamicCluster { + pub name: CompactString, + pub bind_device: Option, + load_assignment: Option, + pub tls_configurator: Option>, + pub health_check: Option, + pub load_balancing_policy: LbPolicy, +} + +#[derive(Debug, Clone)] +pub struct StaticClusterBuilder { + pub name: CompactString, + pub load_assignment: ClusterLoadAssignmentBuilder, + pub tls_configurator: Option>, + pub health_check: Option, +} + +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct DynamicClusterBuilder { + pub name: CompactString, + pub bind_device: Option, + pub tls_configurator: Option>, + pub health_check: Option, + pub load_balancing_policy: LbPolicy, +} + +impl StaticClusterBuilder { + fn build(self) -> Result { + let StaticClusterBuilder { name, load_assignment, tls_configurator, health_check } = self; + let load_assignment = load_assignment.build()?; + Ok(ClusterType::Static(StaticCluster { name, load_assignment, tls_configurator, health_check })) + } +} + +impl DynamicClusterBuilder { + fn build(self) -> ClusterType { + let DynamicClusterBuilder { name, tls_configurator, health_check, load_balancing_policy, bind_device } = self; + ClusterType::Dynamic(DynamicCluster { + name, + load_assignment: None, + tls_configurator, + health_check, + load_balancing_policy, + bind_device, + }) + } +} + +impl TryFrom<(ClusterConfig, &SecretManager)> for PartialClusterType { + type Error = Error; + fn try_from(value: (ClusterConfig, &SecretManager)) -> std::result::Result { + let (cluster, secrets) = value; + let upstream_tls_context = cluster.tls_config; + let bind_device = cluster.bind_device; + let load_balancing_policy = cluster.load_balancing_policy; + let protocol_options = cluster.http_protocol_options; + + let cluster_tls_configurator = if let Some(upstream_tls_context) = upstream_tls_context { + Some(TlsConfigurator::::try_from((upstream_tls_context, secrets))?) + } else { + None + }; + + let health_check = cluster.health_check; + debug!("Cluster {} type {:?} ", cluster.name, cluster.discovery_settings); + match cluster.discovery_settings { + ClusterDiscoveryType::Static(cla) => { + let server_name = cluster_tls_configurator + .as_ref() + .map(|tls_configurator| ServerName::try_from(tls_configurator.sni())) + .transpose()?; + + let cla = ClusterLoadAssignmentBuilder::builder() + .with_cla(PartialClusterLoadAssignment::try_from(cla)?) + .with_cluster_name(cluster.name.clone()) + .with_bind_device(bind_device) + .with_lb_policy(load_balancing_policy) + .with_connection_timeout(cluster.connect_timeout) + .with_tls_configurator(cluster_tls_configurator.clone()) + .with_server_name(server_name) + .with_protocol_options(Some(protocol_options)) + .prepare(); + + Ok(PartialClusterType::Static(StaticClusterBuilder { + name: cluster.name.to_compact_string(), + load_assignment: cla, + tls_configurator: cluster_tls_configurator, + health_check, + })) + }, + ClusterDiscoveryType::Eds => Ok(PartialClusterType::Dynamic(DynamicClusterBuilder { + name: cluster.name.to_compact_string(), + bind_device, + tls_configurator: cluster_tls_configurator, + health_check, + load_balancing_policy, + })), + } + } +} + +#[enum_dispatch] +pub trait ClusterOps { + fn get_name(&self) -> &CompactString; + fn into_health_check(self) -> Option; + fn all_http_channels(&self) -> Vec<(Authority, HttpChannel)>; + fn all_tcp_channels(&self) -> Vec<(Authority, TcpChannel)>; + fn all_grpc_channels(&self) -> Vec>; + fn change_tls_context(&mut self, secret_id: &str, secret: TransportSecret) -> Result<()>; + fn update_health(&mut self, endpoint: &http::uri::Authority, health: HealthStatus); + fn get_http_connection(&mut self, lb_hash: HashState) -> Result; + fn get_tcp_connection(&mut self) -> Result>>; + fn get_grpc_connection(&mut self) -> Result; +} + +#[derive(Debug, Clone)] +#[enum_dispatch(ClusterOps)] +pub enum ClusterType { + Static(StaticCluster), + Dynamic(DynamicCluster), +} + +#[derive(Debug, Clone)] +pub enum PartialClusterType { + Static(StaticClusterBuilder), + Dynamic(DynamicClusterBuilder), +} + +impl PartialClusterType { + pub fn build(self) -> Result { + match self { + Self::Static(cluster_builder) => cluster_builder.build(), + Self::Dynamic(cluster_builder) => Ok(cluster_builder.build()), + } + } + + pub fn get_name(&self) -> &CompactString { + match &self { + PartialClusterType::Static(cluster) => &cluster.name, + PartialClusterType::Dynamic(cluster) => &cluster.name, + } + } + + pub fn into_health_check(self) -> Option { + match self { + PartialClusterType::Static(cluster) => cluster.health_check, + PartialClusterType::Dynamic(cluster) => cluster.health_check, + } + } +} + +impl DynamicCluster { + pub fn change_load_assignment(&mut self, cluster_load_assignment: Option) { + self.load_assignment = cluster_load_assignment; + } +} + +impl ClusterOps for DynamicCluster { + fn get_name(&self) -> &CompactString { + &self.name + } + + fn into_health_check(self) -> Option { + self.health_check + } + + fn all_http_channels(&self) -> Vec<(Authority, HttpChannel)> { + self.load_assignment.as_ref().map_or(Vec::new(), ClusterLoadAssignment::all_http_channels) + } + + fn all_tcp_channels(&self) -> Vec<(Authority, TcpChannel)> { + self.load_assignment.as_ref().map_or(Vec::new(), ClusterLoadAssignment::all_tcp_channels) + } + + fn all_grpc_channels(&self) -> Vec> { + self.load_assignment.as_ref().map_or(Vec::new(), ClusterLoadAssignment::try_all_grpc_channels) + } + + fn change_tls_context(&mut self, secret_id: &str, secret: TransportSecret) -> Result<()> { + if let Some(tls_configurator) = self.tls_configurator.clone() { + let tls_configurator = + TlsConfigurator::::update(tls_configurator, secret_id, secret)?; + if let Some(mut load_assignment) = self.load_assignment.take() { + load_assignment.tls_configurator = Some(tls_configurator.clone()); + let load_assignment = load_assignment.rebuild()?; + self.load_assignment = Some(load_assignment); + }; + self.tls_configurator = Some(tls_configurator); + } + Ok(()) + } + + fn update_health(&mut self, endpoint: &http::uri::Authority, health: HealthStatus) { + if let Some(load_assignment) = self.load_assignment.as_mut() { + load_assignment.update_endpoint_health(endpoint, health); + } + } + + fn get_http_connection(&mut self, lb_hash: HashState) -> Result { + if let Some(cla) = self.load_assignment.as_mut() { + cla.get_http_channel(lb_hash) + } else { + Err(format!("{} No channels available", self.name).into()) + } + } + + fn get_tcp_connection(&mut self) -> Result>> { + if let Some(cla) = self.load_assignment.as_mut() { + cla.get_tcp_channel() + } else { + Err(format!("{} No channels available", self.name).into()) + } + } + + fn get_grpc_connection(&mut self) -> Result { + if let Some(cla) = self.load_assignment.as_mut() { + cla.get_grpc_channel() + } else { + Err(format!("{} No channels available", self.name).into()) + } + } +} + +impl ClusterOps for StaticCluster { + fn get_name(&self) -> &CompactString { + &self.name + } + + fn into_health_check(self) -> Option { + self.health_check + } + + fn all_http_channels(&self) -> Vec<(Authority, HttpChannel)> { + self.load_assignment.all_http_channels() + } + + fn all_tcp_channels(&self) -> Vec<(Authority, TcpChannel)> { + self.load_assignment.all_tcp_channels() + } + + fn all_grpc_channels(&self) -> Vec> { + self.load_assignment.try_all_grpc_channels() + } + + fn change_tls_context(&mut self, secret_id: &str, secret: TransportSecret) -> Result<()> { + if let Some(tls_configurator) = self.tls_configurator.clone() { + let tls_configurator = + TlsConfigurator::::update(tls_configurator, secret_id, secret)?; + let mut load_assignment = self.load_assignment.clone(); + load_assignment.tls_configurator = Some(tls_configurator.clone()); + let load_assignment = load_assignment.rebuild()?; + self.load_assignment = load_assignment; + self.tls_configurator = Some(tls_configurator); + } + Ok(()) + } + + fn update_health(&mut self, endpoint: &http::uri::Authority, health: HealthStatus) { + self.load_assignment.update_endpoint_health(endpoint, health); + } + + fn get_http_connection(&mut self, lb_hash: HashState) -> Result { + debug!("{} : Getting connection", self.name); + self.load_assignment.get_http_channel(lb_hash) + } + + fn get_tcp_connection(&mut self) -> Result>> { + self.load_assignment.get_tcp_channel() + } + + fn get_grpc_connection(&mut self) -> Result { + self.load_assignment.get_grpc_channel() + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use orion_data_plane_api::decode::from_yaml; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::cluster::v3::Cluster as EnvoyCluster; + + use super::*; + + fn check_bind_device(c: &ClusterType, device_name: &str) { + let expected_bind_device = Some(BindDevice::from_str(device_name).unwrap()); + + let cla = match c { + ClusterType::Static(s) => Some(&s.load_assignment), + ClusterType::Dynamic(d) => { + assert_eq!(&d.bind_device, &expected_bind_device); + d.load_assignment.as_ref() + }, + }; + + if let Some(load_assignment) = cla { + for lep in &load_assignment.endpoints { + for ep in &lep.endpoints { + assert_eq!(ep.bind_device, expected_bind_device); + } + } + } + } + + #[test] + fn static_cluster_upstream_bind_device() { + const CLUSTER: &str = r#" +name: cluster1 +type: STATIC +upstream_bind_config: + socket_options: + - description: "bind to interface virt1" + level: 1 + name: 25 + # utf8 string 'virt1' bytes encoded as base64 + buf_value: dmlydDE= +load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.168.2.10 + port_value: 80 +"#; + + let secrets_man = SecretManager::new(); + let envoy_cluster: EnvoyCluster = from_yaml(CLUSTER).unwrap(); + let cluster = ClusterConfig::try_from(envoy_cluster).unwrap(); + let c = PartialClusterType::try_from((cluster, &secrets_man)).unwrap(); + let c = c.build().unwrap(); + + check_bind_device(&c, "virt1"); + } + + #[test] + fn eds_cluster_upstream_bind_device() { + const CLUSTER: &str = r#" +name: cluster1 +type: EDS +upstream_bind_config: + socket_options: + - description: "bind to interface virt1" + level: 1 + name: 25 + # utf8 string 'virt1' bytes encoded as base64 + buf_value: dmlydDE= +"#; + + let secrets_man = SecretManager::new(); + let envoy_cluster: EnvoyCluster = from_yaml(CLUSTER).unwrap(); + let cluster = ClusterConfig::try_from(envoy_cluster).unwrap(); + let c = PartialClusterType::try_from((cluster, &secrets_man)).unwrap(); + println!("{c:#?}"); + let c = c.build().unwrap(); + check_bind_device(&c, "virt1"); + } + + #[test] + fn cluster_2_health_check_not_supported() { + const CLUSTER: &str = r#" +name: cluster1 +type: EDS +health_checks: +- timeout: 0.1s + interval: 5s + healthy_threshold: "3" + unhealthy_threshold: "2" + http_health_check: + path: /health +- timeout: 0.1s + interval: 5s + healthy_threshold: "3" + unhealthy_threshold: "2" + http_health_check: + path: /health +load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.168.2.10 + port_value: 80 +"#; + + let envoy_cluster: EnvoyCluster = from_yaml(CLUSTER).unwrap(); + assert_eq!(envoy_cluster.health_checks.len(), 2); + let _ = ClusterConfig::try_from(envoy_cluster).unwrap_err(); + } +} diff --git a/orion-lib/src/clusters/clusters_manager.rs b/orion-lib/src/clusters/clusters_manager.rs new file mode 100644 index 00000000..27265b06 --- /dev/null +++ b/orion-lib/src/clusters/clusters_manager.rs @@ -0,0 +1,191 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{ + balancers::hash_policy::HashState, + cached_watch::{CachedWatch, CachedWatcher}, + cluster::{ClusterType, TcpService}, + health::HealthStatus, + load_assignment::{ClusterLoadAssignmentBuilder, PartialClusterLoadAssignment}, +}; +use crate::clusters::cluster::ClusterOps; +use crate::transport::{GrpcService, TcpChannel}; +use crate::PartialClusterType; +use crate::Result; +use crate::{secrets::TransportSecret, transport::HttpChannel}; +use compact_str::CompactString; +use http::uri::Authority; +use orion_configuration::config::cluster::ClusterSpecifier as ClusterSpecifierConfig; +use rand::prelude::SliceRandom; +use rand::thread_rng; +use std::cell::RefCell; +use std::collections::{btree_map::Entry as BTreeEntry, BTreeMap}; +use tracing::{debug, warn}; + +type ClustersMap = BTreeMap; + +static CLUSTERS_MAP: CachedWatch = CachedWatch::new(ClustersMap::new()); + +thread_local! { + static CLUSTERS_MAP_CACHE : RefCell> = RefCell::new(CLUSTERS_MAP.watcher()); +} + +pub fn change_cluster_load_assignment(name: &str, cla: &PartialClusterLoadAssignment) -> Result { + CLUSTERS_MAP.update(|current| { + if let Some(cluster) = current.get_mut(name) { + match cluster { + ClusterType::Dynamic(dynamyc_cluster) => { + let cla = ClusterLoadAssignmentBuilder::builder() + .with_cla(cla.clone()) + .with_tls_configurator(dynamyc_cluster.tls_configurator.clone()) + .with_cluster_name(dynamyc_cluster.name.clone()) + .with_bind_device(dynamyc_cluster.bind_device.clone()) + .with_lb_policy(dynamyc_cluster.load_balancing_policy) + .prepare(); + cla.build().map(|cla| dynamyc_cluster.change_load_assignment(Some(cla)))?; + Ok(cluster.clone()) + }, + ClusterType::Static(_) => { + let msg = format!("{name} Attempt to change CLA for static cluster"); + warn!(msg); + Err(msg.into()) + }, + } + } else { + let msg = format!("{name} No cluster found"); + warn!(msg); + Err(msg.into()) + } + }) +} + +pub fn remove_cluster_load_assignment(name: &str) -> Result<()> { + CLUSTERS_MAP.update(|current| { + let maybe_cluster = current.get_mut(name); + if let Some(cluster) = maybe_cluster { + match cluster { + ClusterType::Dynamic(cluster) => { + cluster.change_load_assignment(None); + Ok(()) + }, + ClusterType::Static(_) => { + let msg = format!("{name} Attempt to change CLA for static cluster"); + warn!(msg); + Err(msg.into()) + }, + } + } else { + let msg = format!("{name} No cluster found"); + warn!(msg); + Err(msg.into()) + } + }) +} + +pub fn update_endpoint_health(cluster: &str, endpoint: &Authority, health: HealthStatus) { + CLUSTERS_MAP.update(|current| { + if let Some(cluster) = current.get_mut(cluster) { + cluster.update_health(endpoint, health); + } + }); +} + +pub fn update_tls_context(secret_id: &str, secret: &TransportSecret) -> Result> { + CLUSTERS_MAP.update(|current| { + let mut cluster_configs = Vec::with_capacity(current.len()); + for cluster in current.values_mut() { + cluster.change_tls_context(secret_id, secret.clone())?; + cluster_configs.push(cluster.clone()); + } + Ok(cluster_configs) + }) +} + +pub fn add_cluster(partial_cluster: PartialClusterType) -> Result { + let cluster = partial_cluster.build()?; + let cluster_name = cluster.get_name().clone(); + + CLUSTERS_MAP.update(|current| match current.entry(cluster_name) { + BTreeEntry::Vacant(entry) => { + entry.insert(cluster.clone()); + Ok(cluster) + }, + BTreeEntry::Occupied(entry) => { + let cluster_name = entry.key(); + Err(format!("Cluster {cluster_name} already exists... need to remove it first").into()) + }, + }) +} + +pub fn remove_cluster(cluster_name: &str) -> Result<()> { + CLUSTERS_MAP.update(|current| current.remove(cluster_name).map(|_| ()).ok_or("No such cluster".into())) +} + +pub fn get_http_connection(selector: &ClusterSpecifierConfig, lb_hash: HashState) -> Result { + debug!("Http connection for {selector:?}"); + with_cluster_selector(selector, |cluster| cluster.get_http_connection(lb_hash)) +} + +pub fn get_tcp_connection(selector: &ClusterSpecifierConfig) -> Result { + with_cluster_selector(selector, ClusterOps::get_tcp_connection) +} + +pub fn get_grpc_connection(selector: &ClusterSpecifierConfig) -> Result { + with_cluster_selector(selector, ClusterOps::get_grpc_connection) +} + +pub fn all_http_connections(cluster_name: &str) -> Result> { + with_cluster(cluster_name, |cluster| Ok(cluster.all_http_channels())) +} + +pub fn all_tcp_connections(cluster_name: &str) -> Result> { + with_cluster(cluster_name, |cluster| Ok(cluster.all_tcp_channels())) +} + +pub fn all_grpc_connections(cluster_name: &str) -> Result>> { + with_cluster(cluster_name, |cluster| Ok(cluster.all_grpc_channels())) +} + +fn with_cluster_selector(selector: &ClusterSpecifierConfig, f: F) -> Result +where + F: FnOnce(&mut ClusterType) -> Result, +{ + let cluster_name = match selector { + ClusterSpecifierConfig::Cluster(cluster_name) => cluster_name, + ClusterSpecifierConfig::WeightedCluster(weighted_clusters) => { + &weighted_clusters.choose_weighted(&mut thread_rng(), |cluster| u32::from(cluster.weight))?.cluster + }, + }; + + with_cluster(cluster_name, f) +} + +fn with_cluster(cluster_name: &str, f: F) -> Result +where + F: FnOnce(&mut ClusterType) -> Result, +{ + CLUSTERS_MAP_CACHE.with_borrow_mut(|watcher| { + if let Some(cluster) = watcher.cached_or_latest().get_mut(cluster_name) { + f(cluster) + } else { + Err(format!("Cluster {cluster_name} not found").into()) + } + }) +} diff --git a/orion-lib/src/clusters/health/checkers/checker.rs b/orion-lib/src/clusters/health/checkers/checker.rs new file mode 100644 index 00000000..d06acbcc --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/checker.rs @@ -0,0 +1,235 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::future::Future; +use std::{sync::Arc, time::Duration}; + +use orion_configuration::config::cluster::{health_check::ClusterHealthCheck, HealthStatus}; +use pingora_timeout::fast_timeout::fast_timeout; +use rand::Rng; +use rand::{distributions::Uniform, thread_rng}; +use tokio::sync::mpsc; +use tokio::task::JoinHandle; +use tokio::{select, sync::Notify}; + +use crate::clusters::health::counter::HealthStatusCounter; +use crate::clusters::health::{EndpointHealthUpdate, EndpointId}; +use crate::Error; + +use super::CurrentHealthStatus; + +pub struct HealthCheckerLoop { + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + sender: mpsc::Sender, + stop_signal: Arc, + interval_waiter: W, + checker: P, +} + +impl HealthCheckerLoop +where + W: WaitInterval + Send + 'static, + P: ProtocolChecker + Send + 'static, + P::Response: Send, +{ + pub fn new( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + sender: mpsc::Sender, + stop_signal: Arc, + interval_waiter: W, + checker: P, + ) -> Self { + Self { endpoint, cluster_config, sender, stop_signal, interval_waiter, checker } + } + + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(self.run()) + } + + async fn run(mut self) -> Result<(), Error> { + let mut health_status = + HealthStatusCounter::new(self.cluster_config.healthy_threshold, self.cluster_config.unhealthy_threshold); + + // Initial jitter + if wait_was_cancelled_opt(self.cluster_config.initial_jitter.map(get_random_duration), &self.stop_signal).await + { + return Ok(()); + } + + loop { + tracing::debug!("Sending health check to {:?}", self.endpoint.endpoint); + + // Wait for the response or cancellation + + let check_result = select! { + () = self.stop_signal.notified() => HealthCheckResult::Cancelled, + result = fast_timeout(self.cluster_config.timeout, self.checker.check()) => { + if let Ok(response) = result { + HealthCheckResult::Response(response) + } else { + HealthCheckResult::Timeout + } + } + }; + + let health_status_change = match check_result { + HealthCheckResult::Timeout => { + tracing::debug!( + "Response for {:?} in cluster {}: timeout", + self.endpoint.endpoint, + self.endpoint.cluster + ); + health_status.add_failure() + }, + HealthCheckResult::Response(Err(err)) => { + tracing::debug!( + "Response for {:?} in cluster {}: error {}", + self.endpoint.endpoint, + self.endpoint.cluster, + err + ); + health_status.add_failure() + }, + HealthCheckResult::Response(Ok(response)) => { + self.checker.process_response(&self.endpoint, &mut health_status, &response) + }, + HealthCheckResult::Cancelled => { + tracing::debug!( + "Stopping checks of endpoint {:?} in cluster {:?}", + self.endpoint.endpoint, + self.endpoint.cluster + ); + return Ok(()); + }, + }; + + let _ = self + .sender + .send(EndpointHealthUpdate { + endpoint: self.endpoint.clone(), + health: health_status.status().unwrap_or_default(), + changed: health_status_change.is_some(), + }) + .await; + + if self + .interval_waiter + .wait_interval_was_cancelled( + &self.cluster_config, + match health_status_change { + Some(new_status) => CurrentHealthStatus::Edge(new_status), + None => CurrentHealthStatus::Unchanged(health_status.status()), + }, + &self.stop_signal, + ) + .await + { + return Ok(()); + } + } + } +} + +pub trait ProtocolChecker { + type Response; + fn check(&mut self) -> impl Future> + Send; + fn process_response( + &self, + endpoint: &EndpointId, + counter: &mut HealthStatusCounter, + response: &Self::Response, + ) -> Option; +} + +pub enum HealthCheckResult { + Response(Result), + Timeout, + Cancelled, +} + +pub trait WaitInterval { + /// Wait for the next interval of this checker, based on the configutation and the + /// current health status. Returns `true` if it was cancelled during the wait. + fn wait_interval_was_cancelled( + &self, + config: &ClusterHealthCheck, + health_status: CurrentHealthStatus, + stop_signal: &Notify, + ) -> impl Future + Send; +} + +pub struct IntervalWaiter; + +impl WaitInterval for IntervalWaiter { + async fn wait_interval_was_cancelled( + &self, + config: &ClusterHealthCheck, + health_status: CurrentHealthStatus, + stop_signal: &Notify, + ) -> bool { + // 1. Base interval + let mut interval = match health_status { + CurrentHealthStatus::Edge(new_health_status) => { + // a) Edge interval (transition from one status to another) + match new_health_status { + HealthStatus::Healthy => config.healthy_edge_interval, + HealthStatus::Unhealthy => config.unhealthy_edge_interval, + } + .unwrap_or(config.interval) + }, + CurrentHealthStatus::Unchanged(Some(HealthStatus::Unhealthy)) => { + // b) Same status interval + config.unhealthy_interval.unwrap_or(config.interval) + }, + CurrentHealthStatus::Unchanged(_) => config.interval, + }; + + // 2. Add interval jitter + if let Some(interval_jitter) = config.interval_jitter { + interval += interval_jitter; + } + + // 3. Add interval jitter percent + interval += interval.mul_f32(config.interval_jitter_percent); + + wait_was_cancelled(interval, stop_signal).await + } +} + +/// Wait for the `interval`. Returns `true` if it was cancelled. +async fn wait_was_cancelled(interval: Duration, stop_signal: &Notify) -> bool { + fast_timeout(interval, stop_signal.notified()).await.is_err() +} + +/// If the option has a value, wait for the `interval`. If the option is empty, return immediately. +/// Returns `true` if it was cancelled during the wait. +async fn wait_was_cancelled_opt(interval: Option, stop_signal: &Notify) -> bool { + if let Some(interval) = interval { + wait_was_cancelled(interval, stop_signal).await + } else { + false + } +} + +fn get_random_duration(max: Duration) -> Duration { + thread_rng().sample(Uniform::new_inclusive(Duration::from_secs(0), max)) +} diff --git a/orion-lib/src/clusters/health/checkers/grpc/mod.rs b/orion-lib/src/clusters/health/checkers/grpc/mod.rs new file mode 100644 index 00000000..30b7f97e --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/grpc/mod.rs @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::FutureExt; +use orion_configuration::config::cluster::health_check::{ClusterHealthCheck, GrpcHealthCheck}; +use orion_xds::grpc_deps::tonic_health::pb::health_client::HealthClient; +use orion_xds::grpc_deps::tonic_health::pb::{ + health_check_response::ServingStatus, HealthCheckRequest, HealthCheckResponse, +}; +use orion_xds::grpc_deps::{Response as TonicResponse, Status as TonicStatus}; +use tokio::sync::{mpsc, Notify}; +use tokio::task::JoinHandle; + +use super::checker::{IntervalWaiter, ProtocolChecker, WaitInterval}; +use crate::clusters::health::checkers::checker::HealthCheckerLoop; +use crate::clusters::health::counter::HealthStatusCounter; +use crate::clusters::health::{EndpointHealthUpdate, EndpointId}; +use crate::transport::GrpcService; +use crate::Error; + +/// Spawns an HTTP health checker and returns its handle. Must be called from a Tokio runtime context. +pub fn spawn_grpc_health_checker( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: GrpcHealthCheck, + channel: GrpcService, + sender: mpsc::Sender, + stop_signal: Arc, +) -> JoinHandle> { + let interval_waiter = IntervalWaiter; + spawn_grpc_health_checker_impl( + endpoint, + cluster_config, + protocol_config, + sender, + stop_signal, + (HealthClient::new(channel), interval_waiter), + ) +} + +trait GrpcHealthChannel { + fn check( + &mut self, + request: HealthCheckRequest, + ) -> BoxFuture, TonicStatus>>; +} + +impl GrpcHealthChannel for HealthClient { + fn check( + &mut self, + request: HealthCheckRequest, + ) -> BoxFuture, TonicStatus>> { + HealthClient::check(self, request).boxed() + } +} + +/// Actual implementation of `spawn_grpc_health_checker()`, with `dependencies` containing the +/// injected gRPC stack builder and interval waiter. +fn spawn_grpc_health_checker_impl( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: GrpcHealthCheck, + sender: mpsc::Sender, + stop_signal: Arc, + dependencies: (G, W), +) -> JoinHandle> +where + G: GrpcHealthChannel + Send + 'static, + W: WaitInterval + Send + 'static, +{ + tracing::debug!( + "Starting gRPC health checks of endpoint {:?} in cluster {:?}", + endpoint.endpoint, + endpoint.cluster + ); + + let (grpc_client, interval_waiter) = dependencies; + + let grpc_checker = GrpcChecker { channel: grpc_client, config: protocol_config }; + + let check_loop = + HealthCheckerLoop::new(endpoint, cluster_config, sender, stop_signal, interval_waiter, grpc_checker); + + check_loop.spawn() +} + +struct GrpcChecker { + channel: G, + config: GrpcHealthCheck, +} + +impl ProtocolChecker for GrpcChecker +where + G: GrpcHealthChannel + Send, +{ + type Response = HealthCheckResponse; + + fn check( + &mut self, + ) -> impl futures::Future::Response, orion_error::Error>> + + std::marker::Send { + async move { + let request = HealthCheckRequest { service: self.config.service_name.clone().into() }; + Ok(self.channel.check(request).await.map(TonicResponse::into_inner)?) + } + .boxed() + } + + fn process_response( + &self, + endpoint: &EndpointId, + counter: &mut HealthStatusCounter, + response: &Self::Response, + ) -> Option { + match response.status() { + status @ (ServingStatus::Unknown | ServingStatus::NotServing | ServingStatus::ServiceUnknown) => { + tracing::debug!( + "Failed health check of {:?} in cluster {}: {}", + endpoint.endpoint, + endpoint.cluster, + status.as_str_name(), + ); + counter.add_failure() + }, + ServingStatus::Serving => counter.add_success(), + } + } +} diff --git a/orion-lib/src/clusters/health/checkers/grpc/tests.rs b/orion-lib/src/clusters/health/checkers/grpc/tests.rs new file mode 100644 index 00000000..59cb57ca --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/grpc/tests.rs @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +/* + * The test fixture constructs a mock gRPC stack, which inspects the requests and + * reports them to the test cases. A queue of responses is used to reply to all + * the requests that arrive from the health checker. No actual HTTP or TCP + * connections are done. It's a bit more code, but worth in the long run. + */ + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use futures::future::BoxFuture; +use orion_xds::grpc_deps::Response; +use tokio::sync::mpsc; + +use crate::clusters::health::checkers::tests::{deref, TestFixture}; +use crate::clusters::health::HealthStatus; + +use super::*; + +/// Channels to report every time a gRPC request is made, `requests`, +/// and will respond with the items in `responses`. +struct GrpcActionTrace { + requests: mpsc::UnboundedSender, + responses: mpsc::UnboundedReceiver>, +} + +#[derive(Clone)] +struct MockGrpcChannel(Arc>); + +impl MockGrpcChannel { + pub fn new( + requests: mpsc::UnboundedSender, + responses: mpsc::UnboundedReceiver>, + ) -> Self { + MockGrpcChannel(Arc::new(Mutex::new(GrpcActionTrace { requests, responses }))) + } +} + +impl GrpcHealthChannel for MockGrpcChannel { + fn check(&mut self, request: HealthCheckRequest) -> BoxFuture, TonicStatus>> { + let state = Arc::clone(&self.0); + Box::pin(async move { + let state = &mut state.lock().unwrap(); + // Log this request + state.requests.send(request).unwrap(); + // Return the predefined response, if any + let response = state.responses.try_recv().unwrap(); + Ok(response) + }) + } +} + +struct GrpcTestFixture { + inner: TestFixture>, +} + +#[allow(clippy::panic)] +impl GrpcTestFixture { + pub fn new(healthy_threshold: u16, unhealthy_threshold: u16) -> Self { + let protocol_config = GrpcHealthCheck::default(); + let channel_builder = + |_, request_sender, response_receiver| MockGrpcChannel::new(request_sender, response_receiver); + Self { inner: TestFixture::new(protocol_config, healthy_threshold, unhealthy_threshold, channel_builder) } + } + + pub fn start(&mut self) { + self.inner.start(|endpoint, cluster_config, protocol_config, channel, sender, dependencies| { + spawn_grpc_health_checker_impl(endpoint, cluster_config, protocol_config, channel, sender, dependencies) + }); + } + + pub fn enqueue_response(&self, status: ServingStatus) { + let mut response = HealthCheckResponse::default(); + response.set_status(status); + self.inner.enqueue_response(Response::new(response)); + } +} + +deref!(GrpcTestFixture => inner as TestFixture>); + +const HEALTHY_THRESHOLD: u16 = 5; +const UNHEALTHY_THRESHOLD: u16 = 10; + +#[tokio::test] +async fn success() { + let mut test = GrpcTestFixture::new(HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.enqueue_response(ServingStatus::Serving); + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn failure() { + for failure_code in [ServingStatus::NotServing, ServingStatus::ServiceUnknown, ServingStatus::Unknown] { + let mut test = GrpcTestFixture::new(HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.enqueue_response(failure_code); + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; + } +} + +#[tokio::test] +async fn transition_to_healthy() { + let mut test = GrpcTestFixture::new(HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // Start with a failure, then transition to healthy + test.enqueue_response(ServingStatus::NotServing); + for _ in 0..HEALTHY_THRESHOLD { + test.enqueue_response(ServingStatus::Serving); + test.tick(); // allow the checker to advance one interval + } + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + let _req = test.request_expected(Duration::from_millis(100)).await; + } + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn transition_to_unhealthy() { + let mut test = GrpcTestFixture::new(HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // Start with a failure, then transition to unhealthy + test.enqueue_response(ServingStatus::Serving); + for _ in 0..UNHEALTHY_THRESHOLD { + test.enqueue_response(ServingStatus::NotServing); + test.tick(); // allow the checker to advance one interval + } + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + let _req = test.request_expected(Duration::from_millis(100)).await; + } + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} diff --git a/orion-lib/src/clusters/health/checkers/http/mod.rs b/orion-lib/src/clusters/health/checkers/http/mod.rs new file mode 100644 index 00000000..ad7bfd27 --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/http/mod.rs @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#[cfg(test)] +mod tests; + +use std::ops::Range; +use std::sync::Arc; + +use http::uri::{Authority, PathAndQuery, Scheme}; +use http::{Response, Version}; +use orion_configuration::config::cluster::health_check::{ClusterHealthCheck, Codec, HttpHealthCheck}; +use tokio::sync::{mpsc, Notify}; +use tokio::task::JoinHandle; + +use super::checker::{IntervalWaiter, ProtocolChecker, WaitInterval}; +// use crate::clusters::cluster::HyperService; +use crate::clusters::health::checkers::checker::HealthCheckerLoop; +use crate::clusters::health::counter::HealthStatusCounter; +use crate::clusters::health::{EndpointHealthUpdate, EndpointId, HealthStatus}; +use crate::listeners::http_connection_manager::RequestHandler; +use crate::transport::request_context::RequestWithContext; +use crate::transport::HttpChannel; +use crate::{Error, HttpBody}; + +#[derive(Debug, thiserror::Error)] +#[error("invalid HTTP status range")] +pub struct InvalidHttpStatusRange; + +/// Spawns an HTTP health checker and returns its handle. Must be called from a Tokio runtime context. +pub fn try_spawn_http_health_checker( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: HttpHealthCheck, + channel: HttpChannel, + sender: mpsc::Sender, + stop_signal: Arc, +) -> Result>, Error> { + // This is a dumb structure that only creates the HTTP client. + let interval_waiter = IntervalWaiter; + try_spawn_http_health_checker_impl( + endpoint, + cluster_config, + protocol_config, + channel.is_https(), + sender, + stop_signal, + (channel, interval_waiter), + ) +} + +/// Actual implementation of `spawn_http_health_checker()`, with `dependencies` containing the +/// injected HTTP stack builder and interval waiter. +fn try_spawn_http_health_checker_impl( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: HttpHealthCheck, + is_https: bool, + sender: mpsc::Sender, + stop_signal: Arc, + dependencies: (H, W), +) -> Result>, Error> +where + W: WaitInterval + Send + 'static, + H: Send + 'static, + for<'a> &'a H: RequestHandler>, +{ + tracing::debug!( + "Starting HTTP health checks of endpoint {:?} in cluster {:?}", + endpoint.endpoint, + endpoint.cluster + ); + + let (http_client, interval_waiter) = dependencies; + + let http_version = match protocol_config.http_version { + Codec::Http1 => Version::HTTP_11, + Codec::Http2 => Version::HTTP_2, + }; + + let scheme = if is_https { Scheme::HTTPS } else { Scheme::HTTP }; + + let host = protocol_config.host(&endpoint.cluster)?; + let host_name = host.to_string(); + let uri = build_uri(scheme, host, protocol_config.path.unwrap_or(PathAndQuery::from_static("")))?; + + let checker = HttpChecker { + expected_statuses: protocol_config.expected_statuses, + retriable_statuses: protocol_config.retriable_statuses, + http_version, + method: protocol_config.method, + host: host_name, + uri, + client: http_client, + }; + + let check_loop = HealthCheckerLoop::new(endpoint, cluster_config, sender, stop_signal, interval_waiter, checker); + + Ok(check_loop.spawn()) +} + +struct HttpChecker { + expected_statuses: Vec>, + retriable_statuses: Vec>, + http_version: http::Version, + method: http::Method, + host: String, + uri: http::Uri, + client: H, +} + +impl ProtocolChecker for HttpChecker +where + H: Send, + for<'a> &'a H: RequestHandler>, +{ + type Response = Response; + + async fn check(&mut self) -> Result { + let request = create_request(self.http_version, &self.method, &self.host, &self.uri)?; + self.client.to_response(request).await + } + + fn process_response( + &self, + endpoint: &EndpointId, + counter: &mut HealthStatusCounter, + response: &Self::Response, + ) -> Option { + let status_code = response.status(); + let default_expectation = self.expected_statuses.is_empty() && status_code == http::StatusCode::OK; + + if default_expectation || status_in_ranges(&self.expected_statuses, status_code) { + // Expected statuses count as a success, and take priority + tracing::debug!("Response for cluster {:?}: success {}", endpoint.endpoint, status_code); + counter.add_success() + } else if status_in_ranges(&self.retriable_statuses, status_code) { + // Retriable statuses count as a failure, but don't change the status immediately + tracing::debug!("Response for cluster {:?}: retriable failure {}", endpoint.endpoint, status_code); + counter.add_failure() + } else { + // Unexpected statuses immediately cause the endpoint to be unhealthy + tracing::debug!("Response for cluster {:?}: failure {}", endpoint.endpoint, status_code); + counter.add_failure_ignore_threshold() + } + } +} + +fn status_in_ranges(ranges: &[Range], status: http::StatusCode) -> bool { + ranges.iter().any(|range| range.contains(&status.as_u16())) +} + +fn build_uri(scheme: Scheme, host: Authority, path: PathAndQuery) -> Result { + hyper::Uri::builder().scheme(scheme).authority(host).path_and_query(path).build() +} + +fn create_request( + http_version: http::Version, + method: &http::Method, + host: &str, + uri: &http::Uri, +) -> Result, http::Error> { + let req = http::Request::builder().version(http_version).method(method).uri(uri); + let req = if http_version < http::Version::HTTP_2 { req.header("Host", host) } else { req }; + let req = req.header("User-Agent", "orion/health-checks"); + Ok(RequestWithContext::new(req.body(HttpBody::default())?)) +} diff --git a/orion-lib/src/clusters/health/checkers/http/tests.rs b/orion-lib/src/clusters/health/checkers/http/tests.rs new file mode 100644 index 00000000..5b5afed0 --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/http/tests.rs @@ -0,0 +1,268 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +/* + * The test fixture constructs a mock HTTP stack, which inspects the requests and + * reports them to the test cases. A queue of responses is used to reply to all + * the requests that arrive from the health checker. No actual HTTP or TCP + * connections are done. It's a bit more code, but worth in the long run. + */ + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use http::{Response, Version}; +use tokio::sync::mpsc; + +use crate::clusters::health::checkers::tests::{deref, TestFixture}; +use crate::clusters::health::HealthStatus; +use crate::{PolyBody, Result}; + +use super::*; + +/// Channels to report every time an HTTP request is made, `requests`, +/// and will respond with the items in `responses`. +struct HttpActionTrace { + requests: mpsc::UnboundedSender>, + responses: mpsc::UnboundedReceiver>, +} + +#[derive(Clone)] +struct MockHttpStack(Arc>); + +impl MockHttpStack { + pub fn new( + requests: mpsc::UnboundedSender>, + responses: mpsc::UnboundedReceiver>, + ) -> Self { + MockHttpStack(Arc::new(Mutex::new(HttpActionTrace { requests, responses }))) + } +} + +impl<'a> RequestHandler> for &MockHttpStack { + async fn to_response(self, request: RequestWithContext<'a, HttpBody>) -> Result> { + let state = &mut self.0.lock().unwrap(); + // Log this request + state.requests.send(request.req).unwrap(); + // Return the predefined response, if any + let response = state.responses.try_recv()?; + Ok(response) + } +} + +struct HttpTestFixture { + inner: TestFixture, http::Response>, +} + +#[allow(clippy::panic)] +impl HttpTestFixture { + pub fn new(http_version: http::Version, healthy_threshold: u16, unhealthy_threshold: u16) -> Self { + let protocol_config = HttpHealthCheck { + http_version: match http_version { + http::Version::HTTP_11 => Codec::Http1, + http::Version::HTTP_2 => Codec::Http2, + _ => panic!("Unsupported HTTP version"), + }, + ..Default::default() + }; + let channel_builder = + |_, request_sender, response_receiver| MockHttpStack::new(request_sender, response_receiver); + Self { inner: TestFixture::new(protocol_config, healthy_threshold, unhealthy_threshold, channel_builder) } + } + + pub fn enqueue_response(&self, code: http::StatusCode) { + let response = hyper::Response::builder() + .version(match self.inner.protocol_config.http_version { + Codec::Http1 => Version::HTTP_11, + Codec::Http2 => Version::HTTP_2, + }) + .status(code) + .body(HttpBody::default()) + .unwrap(); + self.inner.enqueue_response(response); + } + + pub async fn request_expected(&mut self, timeout_value: Duration) -> http::Request { + let req = self.inner.request_expected(timeout_value).await; + + assert_eq!( + req.version(), + match self.inner.protocol_config.http_version { + Codec::Http1 => Version::HTTP_11, + Codec::Http2 => Version::HTTP_2, + } + ); + assert_eq!(req.method(), self.inner.protocol_config.method, "Wrong HTTP method"); + assert_eq!(req.uri().host(), Some(self.inner.endpoint.cluster.as_str()), "Wrong HTTP host"); + + req + } + + pub fn start(&mut self) { + self.inner.start(|endpoint, cluster_config, protocol_config, channel, sender, dependencies| { + try_spawn_http_health_checker_impl( + endpoint, + cluster_config, + protocol_config, + false, + channel, + sender, + dependencies, + ) + .unwrap() + }); + } +} + +deref!(HttpTestFixture => inner as TestFixture, http::Response>); + +const HEALTHY_THRESHOLD: u16 = 5; +const UNHEALTHY_THRESHOLD: u16 = 10; + +#[tokio::test] +async fn http1() { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.enqueue_response(http::StatusCode::OK); + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn http2() { + let mut test = HttpTestFixture::new(http::Version::HTTP_2, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.enqueue_response(http::StatusCode::OK); + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn failure() { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.enqueue_response(http::StatusCode::NOT_FOUND); + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn transition_to_healthy() { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // Start with a failure, then transition to healthy + test.enqueue_response(http::StatusCode::NOT_FOUND); + for _ in 0..HEALTHY_THRESHOLD { + test.enqueue_response(http::StatusCode::OK); + test.tick(); // allow the checker to advance one interval + } + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + let _req = test.request_expected(Duration::from_millis(100)).await; + } + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn transition_to_unhealthy() { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // Start with a failure, then transition to unhealthy + test.enqueue_response(http::StatusCode::OK); + for _ in 0..UNHEALTHY_THRESHOLD { + test.enqueue_response(http::StatusCode::NOT_FOUND); + test.tick(); // allow the checker to advance one interval + } + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + let _req = test.request_expected(Duration::from_millis(100)).await; + } + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +#[allow(clippy::single_range_in_vec_init)] +async fn expected_and_retriable_statuses() { + use http::StatusCode; + + // The initial expected status (2XX) should cause a healthy update, + // then the retriable status (3XX) should cause a transition if the unhealthy threshold is surpassed. + for (expected_status, retriable_status) in [ + (StatusCode::OK, StatusCode::MULTIPLE_CHOICES), + (StatusCode::CREATED, StatusCode::MOVED_PERMANENTLY), + (StatusCode::ACCEPTED, StatusCode::FOUND), + (StatusCode::NON_AUTHORITATIVE_INFORMATION, StatusCode::SEE_OTHER), + (StatusCode::NO_CONTENT, StatusCode::NOT_MODIFIED), + (StatusCode::RESET_CONTENT, StatusCode::USE_PROXY), + (StatusCode::PARTIAL_CONTENT, StatusCode::TEMPORARY_REDIRECT), + (StatusCode::MULTI_STATUS, StatusCode::PERMANENT_REDIRECT), + ] { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // This is what it is being tested: + test.protocol_config.expected_statuses = vec![200..300]; + test.protocol_config.retriable_statuses = vec![300..400]; + + test.enqueue_response(expected_status); + for _ in 0..UNHEALTHY_THRESHOLD { + test.enqueue_response(retriable_status); + test.tick(); // allow the checker to advance one interval + } + + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + for _ in 0..UNHEALTHY_THRESHOLD { + let _req = test.request_expected(Duration::from_millis(100)).await; + } + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + + test.stop().await; + } +} + +#[tokio::test] +#[allow(clippy::single_range_in_vec_init)] +async fn unexpected_status() { + let mut test = HttpTestFixture::new(http::Version::HTTP_11, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + // Expect any 2XX except 200 OK + test.protocol_config.expected_statuses = vec![201..300]; + + test.enqueue_response(http::StatusCode::ACCEPTED); // 202 expected + test.enqueue_response(http::StatusCode::OK); // 200 unexpected + test.tick(); + + test.start(); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} diff --git a/orion-lib/src/clusters/health/checkers/mod.rs b/orion-lib/src/clusters/health/checkers/mod.rs new file mode 100644 index 00000000..9661c42e --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/mod.rs @@ -0,0 +1,135 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod checker; +mod grpc; +mod http; +mod tcp; +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +use orion_configuration::config::cluster::health_check::{ + ClusterHealthCheck, GrpcHealthCheck, HttpHealthCheck, TcpHealthCheck, +}; +use tokio::sync::{mpsc, Notify}; +use tokio::task::JoinHandle; + +use self::grpc::spawn_grpc_health_checker; +use self::http::try_spawn_http_health_checker; +use self::tcp::spawn_tcp_health_checker; +use super::{EndpointHealthUpdate, EndpointId, HealthStatus}; +use crate::clusters::GrpcService; +use crate::transport::{HttpChannel, TcpChannel}; +use crate::Error; + +#[derive(Debug)] +pub struct EndpointHealthChecker { + health_check_task: Option>>, + stop_signal: Arc, +} + +impl EndpointHealthChecker { + pub fn try_new_http( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: HttpHealthCheck, + channel: HttpChannel, + sender: mpsc::Sender, + ) -> Result { + let stop_signal = Arc::new(Notify::new()); + Ok(EndpointHealthChecker { + health_check_task: { + Some(try_spawn_http_health_checker( + endpoint, + cluster_config, + protocol_config, + channel, + sender, + Arc::clone(&stop_signal), + )?) + }, + stop_signal, + }) + } + + pub fn new_tcp( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: TcpHealthCheck, + channel: TcpChannel, + sender: mpsc::Sender, + ) -> Self { + let stop_signal = Arc::new(Notify::new()); + EndpointHealthChecker { + health_check_task: { + Some(spawn_tcp_health_checker( + endpoint, + cluster_config, + protocol_config, + channel, + sender, + Arc::clone(&stop_signal), + )) + }, + stop_signal, + } + } + + pub fn new_grpc( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: GrpcHealthCheck, + channel: GrpcService, + sender: mpsc::Sender, + ) -> Self { + let stop_signal = Arc::new(Notify::new()); + EndpointHealthChecker { + health_check_task: { + Some(spawn_grpc_health_checker( + endpoint, + cluster_config, + protocol_config, + channel, + sender, + Arc::clone(&stop_signal), + )) + }, + stop_signal, + } + } + + pub async fn stop(mut self) { + self.stop_signal.notify_waiters(); + if let Some(handle) = self.health_check_task.take() { + match handle.await { + Ok(Err(err)) => tracing::warn!("Health checker failed: {}", err), + Err(join_err) => tracing::warn!("Error joining health checker task: {}", join_err), + _ => (), + } + } + } +} + +enum CurrentHealthStatus { + Unchanged(Option), + Edge(HealthStatus), +} diff --git a/orion-lib/src/clusters/health/checkers/tcp/mod.rs b/orion-lib/src/clusters/health/checkers/tcp/mod.rs new file mode 100644 index 00000000..5a3df730 --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/tcp/mod.rs @@ -0,0 +1,244 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::{FutureExt, TryFutureExt}; +use orion_configuration::config::cluster::health_check::{ClusterHealthCheck, TcpHealthCheck}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio::net::TcpStream; +use tokio::sync::{mpsc, Notify}; +use tokio::task::JoinHandle; + +use crate::clusters::health::checkers::checker::HealthCheckerLoop; +use crate::clusters::health::counter::HealthStatusCounter; +use crate::clusters::health::EndpointId; +use crate::transport::TcpChannel; +use crate::{EndpointHealthUpdate, Error}; + +use super::checker::{IntervalWaiter, ProtocolChecker, WaitInterval}; + +const DEFAULT_MAX_PAYLOAD_BUFFER_SIZE: usize = 0x10_0000; // 1 MB + +pub fn spawn_tcp_health_checker( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: TcpHealthCheck, + channel: TcpChannel, + sender: mpsc::Sender, + stop_signal: Arc, +) -> JoinHandle> { + let interval_waiter = IntervalWaiter; + spawn_tcp_health_checker_impl::<_, _, DEFAULT_MAX_PAYLOAD_BUFFER_SIZE>( + endpoint, + cluster_config, + protocol_config, + sender, + stop_signal, + (channel, interval_waiter), + ) +} + +trait TcpClient +where + Self::Stream: AsyncRead + AsyncWrite, +{ + type Stream; + fn connect(&self) -> BoxFuture<'static, std::result::Result>; +} + +impl TcpClient for TcpChannel { + type Stream = TcpStream; + + fn connect(&self) -> BoxFuture<'static, std::result::Result> { + self.connect().map_err(Error::from).boxed() + } +} + +fn spawn_tcp_health_checker_impl( + endpoint: EndpointId, + cluster_config: ClusterHealthCheck, + protocol_config: TcpHealthCheck, + sender: mpsc::Sender, + stop_signal: Arc, + dependencies: (T, W), +) -> JoinHandle> +where + W: WaitInterval + Send + 'static, + T: TcpClient + Send + Sync + 'static, + T::Stream: Unpin + Send + 'static, +{ + tracing::debug!( + "Starting HTTP health checks of endpoint {:?} in cluster {:?}", + endpoint.endpoint, + endpoint.cluster + ); + + let (tcp_client, interval_waiter) = dependencies; + + let tcp_checker = TcpChecker::<_, MAX_PAYLOAD_BUFFER_SIZE> { tcp_client, config: protocol_config }; + let check_loop = + HealthCheckerLoop::new(endpoint, cluster_config, sender, stop_signal, interval_waiter, tcp_checker); + + check_loop.spawn() +} + +struct TcpChecker { + tcp_client: T, + config: TcpHealthCheck, +} + +impl ProtocolChecker for TcpChecker +where + T: TcpClient + Send, + T::Stream: AsyncRead + AsyncWrite + Unpin + Send, +{ + type Response = (); + + async fn check(&mut self) -> Result { + let mut stream = self.tcp_client.connect().await?; + + if let Some(send_payload) = &self.config.send { + stream.write_all(send_payload).await?; + } + + if !self.config.receive.is_empty() { + let mut matcher = PayloadMatcher::<_, MAX_PAYLOAD_BUFFER_SIZE>::new(&mut stream, &self.config.receive); + return matcher.try_match().await; + } + + Ok(()) + } + + fn process_response( + &self, + _endpoint: &EndpointId, + counter: &mut HealthStatusCounter, + _response: &Self::Response, + ) -> Option { + counter.add_success() + } +} + +// See the description of the pattern matcher: +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/health_check.proto#envoy-v3-api-msg-config-core-v3-healthcheck-tcphealthcheck +struct PayloadMatcher<'a, T, const MAX_PAYLOAD_BUFFER_SIZE: usize> +where + T: AsyncRead + Unpin, +{ + buffer: Vec, + stream: &'a mut T, + payloads: &'a [Vec], + payload_size: usize, +} + +impl<'a, T, const MAX_PAYLOAD_BUFFER_SIZE: usize> PayloadMatcher<'a, T, MAX_PAYLOAD_BUFFER_SIZE> +where + T: AsyncRead + Unpin, +{ + fn new(stream: &'a mut T, payloads: &'a [Vec]) -> Self { + let payload_size = payloads.iter().map(Vec::len).sum(); + Self { buffer: Vec::new(), stream, payloads, payload_size } + } + + async fn try_match(&'a mut self) -> Result<(), Error> { + let mut more_bytes = 1024_usize; + + if self.payload_size == 0 { + return Ok(()); + } + + // This algorithm just keeps reading data until the payload matches + // or there is a timeout. It could be improved by discarding the + // buffer if the head of the payload is not found, and only reading + // the remaining bytes if the payload partially matches. + // However, the complexity of that code doesn't seem justified given + // the small benefits, and would require extensive unit tests. + + self.recv_exact(self.payload_size).await?; + + loop { + if self.matches() { + return Ok(()); + } + self.recv_at_most(more_bytes).await?; + + // Let's be increasingly hungry for more data until we reach the maximum + more_bytes = more_bytes.saturating_mul(2); + } + } + + fn matches(&self) -> bool { + let mut index = 0; + + for payload in self.payloads { + if payload.is_empty() { + continue; + } + + let Some(buffer) = &self.buffer.get(index..) else { + tracing::error!("Unexpected out-of-bounds error when verifying the TCP payload in health checker"); + return false; + }; + + let Some(payload_index) = buffer.windows(payload.len()).position(|window| window == payload) else { + return false; + }; + + index += payload_index; + } + + true + } + + async fn recv_at_most(&mut self, bytes_to_read: usize) -> Result<(), Error> { + if self.buffer.len() >= MAX_PAYLOAD_BUFFER_SIZE || bytes_to_read == 0 { + return Err("payload buffer too big".into()); + } + + let prev_size = self.buffer.len(); + let new_size = MAX_PAYLOAD_BUFFER_SIZE.min(prev_size + bytes_to_read); + + self.buffer.resize(new_size, 0); + + let received = self.stream.read(&mut self.buffer[prev_size..]).await?; + + if received == 0 { + return Err("end of stream".into()); + } + + self.buffer.resize(prev_size + received, 0); + + Ok(()) + } + + async fn recv_exact(&mut self, bytes_to_read: usize) -> Result<(), Error> { + let prev_size = self.buffer.len(); + self.buffer.resize(prev_size + bytes_to_read, 0); + + self.stream.read_exact(&mut self.buffer[prev_size..]).await?; + + Ok(()) + } +} diff --git a/orion-lib/src/clusters/health/checkers/tcp/tests.rs b/orion-lib/src/clusters/health/checkers/tcp/tests.rs new file mode 100644 index 00000000..20090f7f --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/tcp/tests.rs @@ -0,0 +1,332 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +/* + * The test fixture constructs a mock TCP stack, which inspects the requests and + * reports them to the test cases. A queue of responses is used to reply to all + * the requests that arrive from the health checker. No actual TCP + * connections are done. It's a bit more code, but worth in the long run. + */ + +use std::collections::VecDeque; +use std::sync::{Arc, Mutex}; +use std::task::Poll; +use std::time::Duration; + +use futures::future::BoxFuture; +use tokio::sync::mpsc; + +use crate::clusters::health::checkers::tests::{deref, TestFixture}; +use crate::clusters::health::HealthStatus; + +use super::*; + +/// Channels to report every time a TCP request is made, `requests`, +/// and will respond with the items in `responses`. +struct TcpActionTrace { + connections_allowed: bool, + connections: mpsc::UnboundedSender, + requests: mpsc::UnboundedSender>, + responses: mpsc::UnboundedReceiver>>, +} + +#[derive(Clone)] +struct MockTcpClient(Arc>); + +impl MockTcpClient { + pub fn new( + connections_allowed: bool, + connections: mpsc::UnboundedSender, + requests: mpsc::UnboundedSender>, + responses: mpsc::UnboundedReceiver>>, + ) -> Self { + MockTcpClient(Arc::new(Mutex::new(TcpActionTrace { connections_allowed, connections, requests, responses }))) + } +} + +#[derive(Clone)] +struct MockTcpStream { + actions: Arc>, + responses: VecDeque>, + buffer: Vec, +} + +impl MockTcpStream { + pub fn new(actions: Arc>) -> Self { + let responses = { + let state = &mut actions.lock().unwrap(); + if let Ok(responses) = state.responses.try_recv() { + responses.into() + } else { + VecDeque::new() + } + }; + MockTcpStream { actions, responses, buffer: Vec::new() } + } + + pub fn read_into_buffer(&mut self, buf: &mut tokio::io::ReadBuf<'_>) -> std::task::Poll> { + if self.buffer.is_empty() { + let data = { + let Some(data) = self.responses.pop_front() else { + return Poll::Ready(Ok(())); + }; + data + }; + self.buffer = data; + } + + self.read_pending(buf); + Poll::Ready(Ok(())) + } + + fn read_pending(&mut self, buf: &mut tokio::io::ReadBuf<'_>) { + let consumed = buf.remaining().min(self.buffer.len()); + buf.put_slice(&self.buffer[..consumed]); + self.buffer.drain(0..consumed); + } +} + +impl AsyncRead for MockTcpStream { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + self.read_into_buffer(buf) + } +} + +impl AsyncWrite for MockTcpStream { + fn poll_write( + self: std::pin::Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + let state = &mut self.actions.lock().unwrap(); + state.requests.send(buf.into()).unwrap(); + Poll::Ready(Ok(buf.len())) + } + + fn poll_flush( + self: std::pin::Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown( + self: std::pin::Pin<&mut Self>, + _cx: &mut std::task::Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } +} + +impl TcpClient for MockTcpClient { + type Stream = MockTcpStream; + + fn connect(&self) -> BoxFuture<'static, std::result::Result> { + { + let state = self.0.lock().unwrap(); + state.connections.send(state.connections_allowed).unwrap(); + if !state.connections_allowed { + return Box::pin(futures::future::err("connections not allowed in this test".into())); + } + } + Box::pin(futures::future::ready(Ok(MockTcpStream::new(Arc::clone(&self.0))))) + } +} + +struct TcpTestFixture { + inner: TestFixture, Vec>>, +} + +impl TcpTestFixture { + const MAX_PAYLOAD_BUFFER_SIZE: usize = 1024; + + pub fn new(allow_connections: bool, healthy_threshold: u16, unhealthy_threshold: u16) -> Self { + let protocol_config = TcpHealthCheck::default(); + let channel_builder = |connections_sender, request_sender, response_receiver| { + MockTcpClient::new(allow_connections, connections_sender, request_sender, response_receiver) + }; + Self { inner: TestFixture::new(protocol_config, healthy_threshold, unhealthy_threshold, channel_builder) } + } + + pub fn start(&mut self) { + self.inner.start(|endpoint, cluster_config, protocol_config, channel, sender, dependencies| { + spawn_tcp_health_checker_impl::<_, _, { Self::MAX_PAYLOAD_BUFFER_SIZE }>( + endpoint, + cluster_config, + protocol_config, + channel, + sender, + dependencies, + ) + }); + } + + pub fn set_send_text_payload(&mut self, text: &str) { + self.inner.protocol_config.send = Some(text.as_bytes().to_vec()); + } + + pub fn add_receive_text_payload(&mut self, text: &str) { + self.inner.protocol_config.receive.push(text.as_bytes().to_vec()); + } + + pub fn enqueue_response(&self, payloads: &[&[u8]]) { + self.inner.enqueue_response(payloads.iter().copied().map(Vec::from).collect()); + } +} + +deref!(TcpTestFixture => inner as TestFixture, Vec>>); + +const HEALTHY_THRESHOLD: u16 = 5; +const UNHEALTHY_THRESHOLD: u16 = 10; + +#[tokio::test] +async fn connect() { + let mut test = TcpTestFixture::new(true, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.start(); + assert!(test.connection_expected(Duration::from_millis(100)).await); + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn failure() { + let mut test = TcpTestFixture::new(false, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + test.start(); + assert!(!test.connection_expected(Duration::from_millis(100)).await); + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn matching() { + fn test_with_payloads(payloads: &[&[u8]]) -> TcpTestFixture { + let mut test = TcpTestFixture::new(true, 1, 1); + + test.set_send_text_payload("Charles Dickens"); + test.add_receive_text_payload("It was the best of times, "); + test.add_receive_text_payload("it was the worst of times"); + + test.enqueue_response(payloads); + test.tick(); + + test + } + + const CHECKS: &[(&[&[u8]], HealthStatus)] = &[ + ( + &[b"foo", b"It was the best of times, ", b"bar", b"it was the worst of times", b"zarb"], + HealthStatus::Healthy, + ), + (&[b"Call me Ishmael"], HealthStatus::Unhealthy), + ( + &[ + b"it was the worst of times", + b"It was the best of times, ", + b"It was the best of times, ", + b"bar", + b"it was the worst of times", + b"zarb", + ], + HealthStatus::Healthy, + ), + ( + &[ + b"it was the worst of times", + b"it was the worst of times", + b"It was the best of times, ", + b"It was the best of times, ", + ], + HealthStatus::Unhealthy, + ), + ]; + + let very_long_response = vec![b' '; TcpTestFixture::MAX_PAYLOAD_BUFFER_SIZE]; + let very_long_payload = + vec![b"It was the best of times, ", very_long_response.as_slice(), b"it was the worst of times"]; + + for (payloads, status) in CHECKS.iter().chain(&[(very_long_payload.as_slice(), HealthStatus::Unhealthy)]) { + let mut test = test_with_payloads(payloads); + + test.start(); + + assert!(test.connection_expected(Duration::from_millis(100)).await); + assert_eq!(test.request_expected(Duration::from_millis(100)).await, b"Charles Dickens"); + let _update = test.health_update_expected(*status, Duration::from_millis(100)).await; + + test.stop().await; + } +} + +#[tokio::test] +async fn transition_to_healthy() { + let mut test = TcpTestFixture::new(true, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + test.set_send_text_payload("Charles Dickens"); + test.add_receive_text_payload("It was the best of times, "); + test.add_receive_text_payload("it was the worst of times"); + + // Start with a failure, then transition to healthy + test.enqueue_response(&[b"Call me Ishmael"]); + + for _ in 0..HEALTHY_THRESHOLD { + test.enqueue_response(&[b"foo", b"It was the best of times, ", b"bar", b"it was the worst of times", b"zarb"]); + test.tick(); // allow the checker to advance one interval + } + test.start(); + + assert!(test.connection_expected(Duration::from_millis(100)).await); + assert_eq!(test.request_expected(Duration::from_millis(100)).await, b"Charles Dickens"); + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + assert!(test.connection_expected(Duration::from_millis(100)).await); + assert_eq!(test.request_expected(Duration::from_millis(100)).await, b"Charles Dickens"); + } + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + test.stop().await; +} + +#[tokio::test] +async fn transition_to_unhealthy() { + let mut test = TcpTestFixture::new(true, HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + test.set_send_text_payload("Charles Dickens"); + test.add_receive_text_payload("It was the best of times, it was the worst of times"); + + // Start with a success, then transition to unhealthy + test.enqueue_response(&["It was the best of times, it was the worst of times".as_bytes()]); + for _ in 0..UNHEALTHY_THRESHOLD { + test.enqueue_response(&[b"Call me Ishmael"]); + test.tick(); // allow the checker to advance one interval + } + test.start(); + assert!(test.connection_expected(Duration::from_millis(100)).await); + let _req = test.request_expected(Duration::from_millis(100)).await; + let _update = test.health_update_expected(HealthStatus::Healthy, Duration::from_millis(100)).await; + for _ in 0..HEALTHY_THRESHOLD { + assert!(test.connection_expected(Duration::from_millis(100)).await); + assert_eq!(test.request_expected(Duration::from_millis(100)).await, b"Charles Dickens"); + } + let _update = test.health_update_expected(HealthStatus::Unhealthy, Duration::from_millis(100)).await; + test.stop().await; +} diff --git a/orion-lib/src/clusters/health/checkers/tests.rs b/orion-lib/src/clusters/health/checkers/tests.rs new file mode 100644 index 00000000..35b09311 --- /dev/null +++ b/orion-lib/src/clusters/health/checkers/tests.rs @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{sync::Arc, time::Duration}; + +use http::uri::Authority; +use orion_configuration::config::cluster::{health_check::ClusterHealthCheck, HealthStatus}; +use pingora_timeout::fast_timeout::fast_timeout; +use tokio::{ + sync::{ + mpsc::{self, Sender, UnboundedReceiver, UnboundedSender}, + Notify, + }, + task::JoinHandle, +}; + +use super::{checker::WaitInterval, CurrentHealthStatus}; +use crate::clusters::health::EndpointId; +use crate::EndpointHealthUpdate; +use crate::Error; + +macro_rules! deref { + ($subclass:ty => $field:ident as $base:ty) => { + impl ::std::ops::Deref for $subclass { + type Target = $base; + + fn deref(&self) -> &Self::Target { + &self.$field + } + } + + impl ::std::ops::DerefMut for $subclass { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.$field + } + } + }; +} +pub(crate) use deref; + +#[derive(Clone)] +pub struct MockIntervalWaiter(Arc>>); + +impl MockIntervalWaiter { + pub fn new() -> (Self, mpsc::UnboundedSender<()>) { + let (sender, receiver) = mpsc::unbounded_channel(); + (MockIntervalWaiter(Arc::new(tokio::sync::Mutex::new(receiver))), sender) + } +} + +impl WaitInterval for MockIntervalWaiter { + async fn wait_interval_was_cancelled( + &self, + _config: &ClusterHealthCheck, + _health_status: CurrentHealthStatus, + stop_signal: &Notify, + ) -> bool { + let intervals = Arc::clone(&self.0); + let mut interval_receiver = intervals.lock().await; + tokio::select! { + () = stop_signal.notified() => true, + _ = interval_receiver.recv() => false, + } + } +} + +pub enum CheckerTask { + Ready((MockChannel, MockIntervalWaiter, mpsc::Sender)), + Started(JoinHandle>), + Finished, +} + +pub struct TestFixture { + pub endpoint: EndpointId, + pub cluster_config: ClusterHealthCheck, + pub protocol_config: ProtocolConfig, + pub stop_signal: Arc, + pub health_event_receiver: mpsc::Receiver, + pub connections_receiver: mpsc::UnboundedReceiver, + pub request_receiver: mpsc::UnboundedReceiver, + pub response_sender: mpsc::UnboundedSender, + pub interval_sender: mpsc::UnboundedSender<()>, + pub checker_task: CheckerTask, +} + +#[allow(clippy::panic)] +impl TestFixture +where + ProtocolConfig: Clone, +{ + pub fn new( + protocol_config: ProtocolConfig, + healthy_threshold: u16, + unhealthy_threshold: u16, + channel_builder: F, + ) -> Self + where + F: FnOnce(UnboundedSender, UnboundedSender, UnboundedReceiver) -> MockChannel, + { + // Prepare all the configuration + let endpoint = EndpointId { cluster: "test_cluster".into(), endpoint: Authority::from_static("10.0.0.1:8080") }; + let cluster_config = ClusterHealthCheck::new( + Duration::from_secs(1), + Duration::from_secs(0), + unhealthy_threshold, + healthy_threshold, + ); + + // Prepare the channels + let (health_event_sender, health_event_receiver) = mpsc::channel::(1000); + let stop_signal = Arc::new(Notify::new()); + + // Prepare mocks + let (connections_sender, connections_receiver) = mpsc::unbounded_channel::(); + let (request_sender, request_receiver) = mpsc::unbounded_channel::(); + let (response_sender, response_receiver) = mpsc::unbounded_channel::(); + let mock_grpc_stack = channel_builder(connections_sender, request_sender, response_receiver); + + let (mock_interval_waiter, interval_sender) = MockIntervalWaiter::new(); + + TestFixture { + endpoint, + cluster_config, + protocol_config, + stop_signal, + health_event_receiver, + connections_receiver, + request_receiver, + response_sender, + interval_sender, + checker_task: CheckerTask::Ready((mock_grpc_stack, mock_interval_waiter, health_event_sender)), + } + } + + pub fn start(&mut self, spawner: F) + where + F: FnOnce( + EndpointId, + ClusterHealthCheck, + ProtocolConfig, + Sender, + Arc, + (MockChannel, MockIntervalWaiter), + ) -> JoinHandle>, + { + // Hack to extract the values from the enum :( + let CheckerTask::Ready((stack_builder, interval_waiter, sender)) = + std::mem::replace(&mut self.checker_task, CheckerTask::Finished) + else { + panic!("Checker task not ready to start"); + }; + self.checker_task = CheckerTask::Started(spawner( + self.endpoint.clone(), + self.cluster_config.clone(), + self.protocol_config.clone(), + sender.clone(), + Arc::clone(&self.stop_signal), + (stack_builder, interval_waiter), + )); + } + + pub async fn stop(&mut self) { + let CheckerTask::Started(task_handle) = &mut self.checker_task else { + panic!("Checker task not running"); + }; + self.stop_signal.notify_waiters(); + task_handle.await.unwrap().unwrap(); + self.checker_task = CheckerTask::Finished; + } + + pub fn enqueue_response(&self, response: Resp) { + self.response_sender.send(response).unwrap(); + } + + pub async fn connection_expected(&mut self, timeout_value: Duration) -> bool { + let Ok(Some(req)) = fast_timeout(timeout_value, self.connections_receiver.recv()).await else { + panic!("Connection not received"); + }; + req + } + + pub async fn request_expected(&mut self, timeout_value: Duration) -> Req { + let Ok(Some(req)) = fast_timeout(timeout_value, self.request_receiver.recv()).await else { + panic!("Health check not received"); + }; + req + } + + pub async fn health_update_expected( + &mut self, + expected_status: HealthStatus, + timeout_value: Duration, + ) -> EndpointHealthUpdate { + loop { + let Ok(Some(health_status)) = fast_timeout(timeout_value, self.health_event_receiver.recv()).await else { + panic!("Health check not received"); + }; + if !health_status.changed { + // ignore repeated status updates + continue; + } + assert_eq!(health_status.endpoint, self.endpoint, "Unexpected endpoint in health update"); + assert_eq!(health_status.health, expected_status, "Unexpected status in health update"); + return health_status; + } + } + + /// Advances the clock in the test, just one interval. + pub fn tick(&self) { + self.interval_sender.send(()).unwrap(); + } +} diff --git a/orion-lib/src/clusters/health/counter.rs b/orion-lib/src/clusters/health/counter.rs new file mode 100644 index 00000000..6839fd98 --- /dev/null +++ b/orion-lib/src/clusters/health/counter.rs @@ -0,0 +1,167 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::HealthStatus; + +pub struct HealthStatusCounter { + status: Option, + checks: i32, + healthy_threshold: i32, + unhealthy_threshold: i32, +} + +impl HealthStatusCounter { + pub fn new(healthy_threshold: u16, unhealthy_threshold: u16) -> Self { + Self { + status: None, + checks: 0, + healthy_threshold: i32::from(healthy_threshold), + unhealthy_threshold: -i32::from(unhealthy_threshold), + } + } + + pub fn status(&self) -> Option { + self.status + } + + /// Computes a new successful check. If the health status changes, it is returned. + pub fn add_success(&mut self) -> Option { + if self.checks < 0 { + self.checks = 0; + } + // No need to count beyond the threshold + if self.checks < self.healthy_threshold { + self.checks += 1; + } + match self.status { + None => { + // During startup, only a single successful health check is required to mark a host healthy + self.update(HealthStatus::Healthy) + }, + Some(HealthStatus::Healthy) => None, + Some(HealthStatus::Unhealthy) => { + if self.checks >= self.healthy_threshold { + self.update(HealthStatus::Healthy) + } else { + None + } + }, + } + } + + /// Computes a new failed check. The unhealthy threshold will be ignored and the endpoint will become unhealthy immediately. + /// If the health status changes, it is returned. + pub fn add_failure_ignore_threshold(&mut self) -> Option { + self.add_failure_impl(true) + } + + /// Computes a new failed check. If the health status changes, it is returned. + pub fn add_failure(&mut self) -> Option { + self.add_failure_impl(false) + } + + fn add_failure_impl(&mut self, ignore_thresold: bool) -> Option { + if self.checks > 0 { + self.checks = 0; + } + if self.checks > self.unhealthy_threshold { + self.checks -= 1; + } + match self.status { + None => { + // During startup, only a single successful health check is required to mark a host healthy + self.update(HealthStatus::Unhealthy) + }, + Some(HealthStatus::Unhealthy) => None, + Some(HealthStatus::Healthy) => { + if ignore_thresold || self.checks <= self.unhealthy_threshold { + self.update(HealthStatus::Unhealthy) + } else { + None + } + }, + } + } + + fn update(&mut self, new_status: HealthStatus) -> Option { + if Some(new_status) != self.status { + self.status = Some(new_status); + } + self.status + } +} + +#[cfg(test)] +mod tests { + use super::HealthStatus; + use super::HealthStatusCounter; + + #[test] + fn health_status_counter() { + const HEALTHY_THRESHOLD: u16 = 5; + const UNHEALTHY_THRESHOLD: u16 = 10; + let mut counter = HealthStatusCounter::new(HEALTHY_THRESHOLD, UNHEALTHY_THRESHOLD); + + assert!(counter.status().is_none(), "Expected unknown health status"); + + assert_eq!(counter.add_success(), Some(HealthStatus::Healthy), "Expected update on first healthy check"); + assert_eq!(counter.status(), Some(HealthStatus::Healthy), "Expected healthy status"); + + // Saturating the healthy state should not cause any new updates + for _ in 0..HEALTHY_THRESHOLD * 2 { + assert!(counter.add_success().is_none(), "Unexpected update"); + assert_eq!(counter.status(), Some(HealthStatus::Healthy), "Expected healthy status"); + } + + // Failed checks below unhealthy threshold should not cause a transition + for _ in 0..UNHEALTHY_THRESHOLD - 1 { + assert!(counter.add_failure().is_none(), "Unexpected update"); + assert_eq!(counter.status(), Some(HealthStatus::Healthy), "Expected healthy status"); + } + + // Crossing the unhealthy threshold + assert_eq!(counter.add_failure(), Some(HealthStatus::Unhealthy), "Expected update about unhealthy state"); + assert_eq!(counter.status(), Some(HealthStatus::Unhealthy), "Expected unhealthy status"); + + // Saturating the unhealthy state should not cause any new updates + for _ in 0..UNHEALTHY_THRESHOLD * 2 { + assert!(counter.add_failure().is_none(), "Unexpected update"); + assert_eq!(counter.status(), Some(HealthStatus::Unhealthy), "Expected unhealthy status"); + } + + // Successful checks below healthy threshold should not cause a transition + for _ in 0..HEALTHY_THRESHOLD - 1 { + assert!(counter.add_success().is_none(), "Unexpected update"); + assert_eq!(counter.status(), Some(HealthStatus::Unhealthy), "Expected unhealthy status"); + } + + // Crossing the healthy threshold + assert_eq!(counter.add_success(), Some(HealthStatus::Healthy), "Expected update about healthy state"); + assert_eq!(counter.status(), Some(HealthStatus::Healthy), "Expected healthy status"); + + // A single failed check ignoring the threshold should cause an immmediate transition + assert_eq!( + counter.add_failure_ignore_threshold(), + Some(HealthStatus::Unhealthy), + "Expected update about unhealthy state" + ); + assert_eq!(counter.status(), Some(HealthStatus::Unhealthy), "Expected unhealthy status"); + } +} diff --git a/orion-lib/src/clusters/health/manager.rs b/orion-lib/src/clusters/health/manager.rs new file mode 100644 index 00000000..21771df4 --- /dev/null +++ b/orion-lib/src/clusters/health/manager.rs @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::collections::HashMap; + +use orion_configuration::config::cluster::health_check::HealthCheckProtocol; +use orion_configuration::config::cluster::HealthCheck; +use tokio::sync::mpsc; + +use crate::clusters::cluster::{ClusterOps, ClusterType}; +use crate::clusters::clusters_manager; +use crate::clusters::health::checkers::EndpointHealthChecker; +use crate::clusters::health::EndpointHealthUpdate; + +use super::EndpointId; + +pub struct HealthCheckManager { + /// This sender is kept here to clone it every time a new health checker is spawned. + updates_from_checkers_sender: mpsc::Sender, + checkers: HashMap>, +} + +impl HealthCheckManager { + pub fn new(updates_from_checkers_sender: mpsc::Sender) -> Self { + HealthCheckManager { updates_from_checkers_sender, checkers: HashMap::new() } + } + + pub async fn stop_all(&mut self) { + for checker in self.checkers.drain().flat_map(|(_, checkers)| checkers.into_iter()) { + checker.stop().await; + } + } + + pub async fn restart_cluster(&mut self, cluster_config: ClusterType) { + let cluster_name = cluster_config.get_name().clone(); + self.stop_cluster(&cluster_name).await; + if let Some(health_check_config) = cluster_config.into_health_check() { + let HealthCheck { cluster: cluster_config, protocol } = health_check_config; + + let checkers = self.checkers.entry(cluster_name.to_string()).or_default(); + + match protocol { + HealthCheckProtocol::Http(http_config) => { + let Ok(endpoints) = clusters_manager::all_http_connections(&cluster_name) else { + return; + }; + + for (authority, channel) in endpoints { + let endpoint_id = EndpointId { cluster: cluster_name.to_string(), endpoint: authority }; + + let new_checker = EndpointHealthChecker::try_new_http( + endpoint_id.clone(), + cluster_config.clone(), + http_config.clone(), + channel, + self.updates_from_checkers_sender.clone(), + ); + + match new_checker { + Ok(checker) => { + checkers.push(checker); + }, + Err(err) => tracing::warn!( + "Could not start new HTTP health checker for endpoint {} in cluster {}: {}", + endpoint_id.endpoint, + endpoint_id.cluster, + err + ), + } + } + }, + HealthCheckProtocol::Tcp(tcp_config) => { + let Ok(endpoints) = clusters_manager::all_tcp_connections(&cluster_name) else { + return; + }; + + for (authority, channel) in endpoints { + let endpoint_id = EndpointId { cluster: cluster_name.to_string(), endpoint: authority }; + + checkers.push(EndpointHealthChecker::new_tcp( + endpoint_id.clone(), + cluster_config.clone(), + tcp_config.clone(), + channel, + self.updates_from_checkers_sender.clone(), + )); + } + }, + HealthCheckProtocol::Grpc(grpc_config) => { + let Ok(endpoints) = clusters_manager::all_grpc_connections(&cluster_name) else { + return; + }; + + for endpoint_result in endpoints { + let (endpoint, channel) = match endpoint_result { + Ok(result) => result, + Err(err) => { + tracing::error!("Failed to obtain gRPC client in cluster {}: {}", cluster_name, err); + continue; + }, + }; + + let endpoint_id = EndpointId { cluster: cluster_name.to_string(), endpoint }; + + checkers.push(EndpointHealthChecker::new_grpc( + endpoint_id.clone(), + cluster_config.clone(), + grpc_config.clone(), + channel, + self.updates_from_checkers_sender.clone(), + )); + } + }, + } + } + } + + pub async fn stop_cluster(&mut self, cluster: &str) { + let Some(endpoints_in_the_cluster) = self.checkers.remove(cluster) else { + return; + }; + for checker in endpoints_in_the_cluster { + checker.stop().await; + } + } +} diff --git a/orion-lib/src/clusters/health/mod.rs b/orion-lib/src/clusters/health/mod.rs new file mode 100644 index 00000000..7861f707 --- /dev/null +++ b/orion-lib/src/clusters/health/mod.rs @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod checkers; +mod counter; +mod manager; + +use http::uri::Authority; + +pub use manager::HealthCheckManager; +pub use orion_configuration::config::cluster::HealthStatus; + +#[derive(Clone, Copy, PartialEq)] +pub enum ValueUpdated { + Updated, + NotUpdated, +} + +pub trait EndpointHealth { + fn health(&self) -> HealthStatus; + fn update_health(&mut self, health: HealthStatus) -> ValueUpdated; + fn is_healthy(&self) -> bool { + self.health() == HealthStatus::Healthy + } +} + +impl EndpointHealth for HealthStatus { + fn health(&self) -> HealthStatus { + *self + } + + fn update_health(&mut self, health_status: Self) -> ValueUpdated { + if *self == health_status { + ValueUpdated::NotUpdated + } else { + *self = health_status; + ValueUpdated::Updated + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct EndpointId { + pub cluster: String, + pub endpoint: Authority, +} + +#[derive(Clone, Debug)] +pub struct EndpointHealthUpdate { + /// The endpoint whose health has been checked. + pub endpoint: EndpointId, + /// The health status. + pub health: HealthStatus, + /// This is `true` if the health status has changed since the last update + /// and `false` if it is repeated. + pub changed: bool, +} diff --git a/orion-lib/src/clusters/load_assignment.rs b/orion-lib/src/clusters/load_assignment.rs new file mode 100644 index 00000000..15d559e7 --- /dev/null +++ b/orion-lib/src/clusters/load_assignment.rs @@ -0,0 +1,507 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{sync::Arc, time::Duration}; + +use compact_str::CompactString; +use futures::future::BoxFuture; +use http::uri::Authority; +use orion_configuration::config::cluster::{ + ClusterLoadAssignment as ClusterLoadAssignmentConfig, HealthStatus, HttpProtocolOptions, + LbEndpoint as LbEndpointConfig, LbPolicy, LocalityLbEndpoints as LocalityLbEndpointsConfig, +}; +use rustls::ClientConfig; +use tokio::net::TcpStream; +use tracing::debug; +use typed_builder::TypedBuilder; +use webpki::types::ServerName; + +use super::{ + balancers::{ + hash_policy::HashState, least::WeightedLeastRequestBalancer, maglev::MaglevBalancer, random::RandomBalancer, + ring::RingHashBalancer, wrr::WeightedRoundRobinBalancer, Balancer, DefaultBalancer, EndpointWithAuthority, + EndpointWithLoad, WeightedEndpoint, + }, + // cluster::HyperService, + health::{EndpointHealth, ValueUpdated}, +}; +use crate::{ + secrets::{TlsConfigurator, WantsToBuildClient}, + transport::{ + bind_device::BindDevice, connector::ConnectError, GrpcService, HttpChannel, HttpChannelBuilder, TcpChannel, + }, + Result, +}; + +#[derive(Debug, Clone)] +pub struct LbEndpoint { + pub name: CompactString, + pub authority: http::uri::Authority, + pub bind_device: Option, + pub weight: u32, + pub health_status: HealthStatus, + http_channel: HttpChannel, + tcp_channel: TcpChannel, +} + +impl PartialEq for LbEndpoint { + fn eq(&self, other: &Self) -> bool { + self.authority == other.authority + } +} + +impl WeightedEndpoint for LbEndpoint { + fn weight(&self) -> u32 { + self.weight + } +} + +impl EndpointWithAuthority for LbEndpoint { + fn authority(&self) -> &Authority { + &self.authority + } +} + +impl Eq for LbEndpoint {} + +impl PartialOrd for LbEndpoint { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for LbEndpoint { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.authority.as_str().cmp(other.authority.as_str()) + } +} + +impl EndpointHealth for LbEndpoint { + fn health(&self) -> HealthStatus { + self.health_status + } + + fn update_health(&mut self, health: HealthStatus) -> ValueUpdated { + self.health_status.update_health(health) + } +} + +impl LbEndpoint { + pub fn grpc_service(&self) -> Result { + GrpcService::try_new(self.http_channel.clone(), self.authority.clone()) + } +} + +#[derive(Debug, Clone)] +pub struct PartialLbEndpoint { + pub authority: http::uri::Authority, + pub bind_device: Option, + pub weight: u32, + pub health_status: HealthStatus, +} + +impl PartialLbEndpoint { + fn new(value: &LbEndpoint) -> Self { + PartialLbEndpoint { + authority: value.authority.clone(), + bind_device: value.bind_device.clone(), + weight: value.weight, + health_status: value.health_status, + } + } +} + +impl EndpointWithLoad for LbEndpoint { + fn http_load(&self) -> u32 { + self.http_channel.load() + } +} + +#[derive(Debug, Clone, TypedBuilder)] +#[builder(build_method(vis="", name=prepare), field_defaults(setter(prefix = "with_")))] +struct LbEndpointBuilder { + cluster_name: CompactString, + endpoint: PartialLbEndpoint, + http_protocol_options: HttpProtocolOptions, + tls_configurator: Option>, + #[builder(default)] + server_name: Option>, + connect_timeout: Option, +} + +impl LbEndpointBuilder { + #[must_use] + fn replace_bind_device(mut self, bind_device: Option) -> Self { + self.endpoint.bind_device = bind_device; + self + } + + pub fn build(self) -> Result> { + let cluster_name = self.cluster_name; + let PartialLbEndpoint { authority, bind_device, weight, health_status } = self.endpoint; + + let builder = HttpChannelBuilder::new(bind_device.clone()) + .with_authority(authority.clone()) + .with_timeout(self.connect_timeout); + let builder = if let Some(tls_conf) = self.tls_configurator { + if let Some(server_name) = self.server_name { + builder.with_tls(tls_conf).with_server_name(server_name) + } else { + builder.with_tls(tls_conf) + } + } else { + builder + }; + let http_channel = builder.with_http_protocol_options(self.http_protocol_options).build()?; + let tcp_channel = TcpChannel::new(&authority, bind_device.clone(), self.connect_timeout); + + Ok(Arc::new(LbEndpoint { + name: cluster_name, + authority, + bind_device, + weight, + health_status, + http_channel, + tcp_channel, + })) + } +} + +impl TryFrom for PartialLbEndpoint { + type Error = crate::Error; + + fn try_from(lb_endpoint: LbEndpointConfig) -> Result { + let health_status = lb_endpoint.health_status; + let address = lb_endpoint.address; + let authority = http::uri::Authority::try_from(format!("{address}"))?; + let weight = lb_endpoint.load_balancing_weight.into(); + Ok(PartialLbEndpoint { authority, bind_device: None, weight, health_status }) + } +} + +#[derive(Debug, Clone, Default)] +pub struct LocalityLbEndpoints { + pub name: CompactString, + pub endpoints: Vec>, + pub priority: u32, + pub healthy_endpoints: u32, + pub total_endpoints: u32, + pub tls_configurator: Option>, + pub http_protocol_options: HttpProtocolOptions, + pub connection_timeout: Option, +} +impl LocalityLbEndpoints { + fn rebuild(self) -> Result { + let endpoints = self + .endpoints + .into_iter() + .map(|e| { + LbEndpointBuilder::builder() + .with_cluster_name(self.name.clone()) + .with_http_protocol_options(self.http_protocol_options.clone()) + .with_connect_timeout(self.connection_timeout) + .with_tls_configurator(self.tls_configurator.clone()) + .with_endpoint(PartialLbEndpoint::new(&e)) + .prepare() + .build() + }) + .collect::>()?; + + Ok(Self { endpoints, ..self }) + } +} + +#[derive(Debug, Clone, Default)] +pub struct PartialLocalityLbEndpoints { + endpoints: Vec, + pub priority: u32, +} +#[derive(Debug, Clone, Default, TypedBuilder)] +#[builder(build_method(vis="", name=prepare), field_defaults(setter(prefix = "with_")))] +pub struct LocalityLbEndpointsBuilder { + cluster_name: CompactString, + bind_device: Option, + endpoints: PartialLocalityLbEndpoints, + http_protocol_options: HttpProtocolOptions, + tls_configurator: Option>, + server_name: Option>, + connection_timeout: Option, +} + +impl LocalityLbEndpointsBuilder { + pub fn build(self) -> Result { + let cluster_name = self.cluster_name; + let PartialLocalityLbEndpoints { endpoints, priority } = self.endpoints; + + let endpoints: Vec> = endpoints + .into_iter() + .map(|e| { + let server_name = self.tls_configurator.as_ref().and(self.server_name.clone()); + + LbEndpointBuilder::builder() + .with_endpoint(e) + .with_cluster_name(cluster_name.clone()) + .with_connect_timeout(self.connection_timeout) + .with_tls_configurator(self.tls_configurator.clone()) + .with_server_name(server_name) + .with_http_protocol_options(self.http_protocol_options.clone()) + .prepare() + .replace_bind_device(self.bind_device.clone()) + .build() + }) + .collect::>()?; + // we divide by 100 because we multiply by 100 later to calculate a percentage + if endpoints.len() > (u32::MAX / 100) as usize { + return Err("Too many endpoints".into()); + } + let healthy_endpoints = endpoints.iter().filter(|e| e.health_status.is_healthy()).count() as u32; + let total_endpoints = endpoints.len() as u32; + + Ok(LocalityLbEndpoints { + name: cluster_name, + endpoints, + priority, + healthy_endpoints, + total_endpoints, + tls_configurator: self.tls_configurator, + http_protocol_options: self.http_protocol_options, + connection_timeout: self.connection_timeout, + }) + } +} + +impl TryFrom for PartialLocalityLbEndpoints { + type Error = crate::Error; + + fn try_from(value: LocalityLbEndpointsConfig) -> Result { + let endpoints = value.lb_endpoints.into_iter().map(PartialLbEndpoint::try_from).collect::>()?; + let priority = value.priority; + Ok(PartialLocalityLbEndpoints { priority, endpoints }) + } +} + +#[derive(Debug, Clone)] +pub enum BalancerType { + RoundRobin(DefaultBalancer, LbEndpoint>), + Random(DefaultBalancer, LbEndpoint>), + LeastRequests(DefaultBalancer, LbEndpoint>), + RingHash(DefaultBalancer, LbEndpoint>), + Maglev(DefaultBalancer, LbEndpoint>), +} + +impl BalancerType { + pub fn update_health(&mut self, endpoint: &LbEndpoint, health: HealthStatus) -> Result { + match self { + BalancerType::RoundRobin(balancer) => balancer.update_health(endpoint, health), + BalancerType::Random(balancer) => balancer.update_health(endpoint, health), + BalancerType::LeastRequests(balancer) => balancer.update_health(endpoint, health), + BalancerType::RingHash(balancer) => balancer.update_health(endpoint, health), + BalancerType::Maglev(balancer) => balancer.update_health(endpoint, health), + } + } + fn next_item(&mut self, maybe_hash: Option) -> Option> { + match self { + BalancerType::RoundRobin(balancer) => balancer.next_item(None), + BalancerType::Random(balancer) => balancer.next_item(None), + BalancerType::LeastRequests(balancer) => balancer.next_item(None), + BalancerType::RingHash(balancer) => balancer.next_item(maybe_hash.and_then(HashState::compute)), + BalancerType::Maglev(balancer) => balancer.next_item(maybe_hash.and_then(HashState::compute)), + } + } +} + +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct ClusterLoadAssignment { + cluster_name: CompactString, + pub tls_configurator: Option>, + protocol_options: HttpProtocolOptions, + balancer: BalancerType, + pub endpoints: Vec, +} + +#[derive(Debug, Clone)] +pub struct PartialClusterLoadAssignment { + endpoints: Vec, +} + +impl ClusterLoadAssignment { + pub fn get_http_channel(&mut self, hash: HashState) -> Result { + let endpoint = self.balancer.next_item(Some(hash)).ok_or("No active endpoint")?; + Ok(endpoint.http_channel.clone()) + } + + pub fn get_tcp_channel(&mut self) -> Result>> { + let endpoint = self.balancer.next_item(None).ok_or("No active endpoint")?; + Ok(endpoint.tcp_channel.connect()) + } + + pub fn get_grpc_channel(&mut self) -> Result { + let endpoint = self.balancer.next_item(None).ok_or("No active endpoint")?; + endpoint.grpc_service() + } + + pub fn all_http_channels(&self) -> Vec<(Authority, HttpChannel)> { + self.all_endpoints_iter().map(|endpoint| (endpoint.authority.clone(), endpoint.http_channel.clone())).collect() + } + + pub fn all_tcp_channels(&self) -> Vec<(Authority, TcpChannel)> { + self.all_endpoints_iter().map(|endpoint| (endpoint.authority.clone(), endpoint.tcp_channel.clone())).collect() + } + + pub fn try_all_grpc_channels(&self) -> Vec> { + self.all_endpoints_iter() + .map(|endpoint| endpoint.grpc_service().map(|channel| (endpoint.authority.clone(), channel))) + .collect() + } + + pub fn update_endpoint_health(&mut self, authority: &http::uri::Authority, health: HealthStatus) { + for locality in &self.endpoints { + locality.endpoints.iter().filter(|endpoint| &endpoint.authority == authority).for_each(|endpoint| { + if let Err(err) = self.balancer.update_health(endpoint, health) { + debug!("Could not update endpoint health: {}", err); + } + }); + } + } + + pub fn rebuild(self) -> Result { + let endpoints = self + .endpoints + .into_iter() + .map(|mut e| { + e.tls_configurator.clone_from(&self.tls_configurator); + e.rebuild() + }) + .collect::>>()?; + let balancer = self.balancer; + Ok(Self { endpoints, balancer, ..self }) + } + + fn all_endpoints_iter(&self) -> impl Iterator { + self.endpoints.iter().flat_map(|locality_endpoints| &locality_endpoints.endpoints).map(Arc::as_ref) + } +} + +#[derive(Debug, Clone, TypedBuilder)] +#[builder(build_method(vis="pub(crate)", name=prepare), field_defaults(setter(prefix = "with_")))] +pub struct ClusterLoadAssignmentBuilder { + cluster_name: CompactString, + cla: PartialClusterLoadAssignment, + bind_device: Option, + #[builder(default)] + protocol_options: Option, + lb_policy: LbPolicy, + tls_configurator: Option>, + #[builder(default)] + server_name: Option>, + #[builder(default)] + connection_timeout: Option, +} + +impl ClusterLoadAssignmentBuilder { + pub fn build(self) -> Result { + let cluster_name = self.cluster_name; + let protocol_options = self.protocol_options.unwrap_or_default(); + + let PartialClusterLoadAssignment { endpoints } = self.cla; + + let endpoints = endpoints + .into_iter() + .map(|e| { + let server_name = self.tls_configurator.as_ref().and(self.server_name.clone()); + + LocalityLbEndpointsBuilder::builder() + .with_cluster_name(cluster_name.clone()) + .with_endpoints(e) + .with_bind_device(self.bind_device.clone()) + .with_connection_timeout(self.connection_timeout) + .with_tls_configurator(self.tls_configurator.clone()) + .with_server_name(server_name) + .with_http_protocol_options(protocol_options.clone()) + .prepare() + .build() + }) + .collect::>>()?; + + let balancer = match self.lb_policy { + LbPolicy::Random => BalancerType::Random(DefaultBalancer::from_slice(&endpoints)), + LbPolicy::RoundRobin => BalancerType::RoundRobin(DefaultBalancer::from_slice(&endpoints)), + LbPolicy::LeastRequest => BalancerType::LeastRequests(DefaultBalancer::from_slice(&endpoints)), + LbPolicy::RingHash => BalancerType::RingHash(DefaultBalancer::from_slice(&endpoints)), + LbPolicy::Maglev => BalancerType::Maglev(DefaultBalancer::from_slice(&endpoints)), + }; + + Ok(ClusterLoadAssignment { + cluster_name, + protocol_options, + balancer, + tls_configurator: self.tls_configurator, + endpoints, + }) + } +} + +impl TryFrom for PartialClusterLoadAssignment { + type Error = crate::Error; + fn try_from(cla: ClusterLoadAssignmentConfig) -> Result { + let endpoints: Vec<_> = + cla.endpoints.into_iter().map(PartialLocalityLbEndpoints::try_from).collect::>()?; + + if endpoints.is_empty() { + return Err("At least one locality must be specified".into()); + } + + Ok(Self { endpoints }) + } +} + +#[cfg(test)] +mod test { + use compact_str::ToCompactString; + use http::uri::Authority; + + use super::LbEndpoint; + use crate::clusters::health::HealthStatus; + use crate::transport::{bind_device::BindDevice, HttpChannelBuilder, TcpChannel}; + + impl LbEndpoint { + /// This function is used by unit tests in other modules + pub fn new( + authority: Authority, + bind_device: Option, + weight: u32, + health_status: HealthStatus, + ) -> Self { + let http_channel = + HttpChannelBuilder::new(bind_device.clone()).with_authority(authority.clone()).build().unwrap(); + let tcp_channel = TcpChannel::new(&authority, bind_device.clone(), None); + + Self { + name: "Cluster".to_compact_string(), + authority, + bind_device, + weight, + health_status, + http_channel, + tcp_channel, + } + } + } +} diff --git a/orion-lib/src/clusters/mod.rs b/orion-lib/src/clusters/mod.rs new file mode 100644 index 00000000..21399681 --- /dev/null +++ b/orion-lib/src/clusters/mod.rs @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub(crate) mod balancers; +pub(crate) mod cached_watch; +pub(crate) mod cluster; +pub(crate) mod clusters_manager; +pub(crate) mod health; +pub(crate) mod load_assignment; +pub(crate) mod retry_policy; +pub use crate::transport::GrpcService; +pub use load_assignment::{ClusterLoadAssignmentBuilder, PartialClusterLoadAssignment}; + +pub use clusters_manager::{ + add_cluster, change_cluster_load_assignment, get_grpc_connection, remove_cluster, remove_cluster_load_assignment, + update_endpoint_health, update_tls_context, +}; diff --git a/orion-lib/src/clusters/retry_policy.rs b/orion-lib/src/clusters/retry_policy.rs new file mode 100644 index 00000000..4a6a8a38 --- /dev/null +++ b/orion-lib/src/clusters/retry_policy.rs @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use http::Response; +use http_body::Body; + +use orion_configuration::config::network_filters::http_connection_manager::{RetryOn, RetryPolicy}; + +use tokio::time::error::Elapsed; + +use crate::Error as BoxError; +use std::error::Error; + +const X_ENVOY_RATELIMITED: &str = "x-envoy-ratelimited"; +const X_ORION_RATELIMITED: &str = "x-orion-ratelimited"; + +#[derive(Clone, Copy, Debug, thiserror::Error)] +pub enum EventError { + #[error("ConnectFailure")] + ConnectFailure, + #[error("ConnectTimeout")] + ConnectTimeout, + #[error("PerTryTimeout)")] + PerTryTimeout, + #[error("RouteTimeout")] + RouteTimeout, + #[error("Reset")] + Reset, + #[error("RefusedStream")] + RefusedStream, + #[allow(unused)] + #[error("Http3PostConnectFailure")] + Http3PostConnectFailure, +} + +#[derive(Clone, Copy, Debug)] +pub enum FailureKind<'a, B> { + Event(EventError), + EligibleForRetry(&'a Response), +} + +impl<'a, B: Body> FailureKind<'a, B> { + pub fn inner_response(&self) -> Option<&Response> { + if let FailureKind::EligibleForRetry(resp) = self { + Some(resp) + } else { + None + } + } + + pub fn try_infer(result: &'a Result, BoxError>) -> Option> { + match result { + Ok(ref resp) => { + // NOTE: exclude a priory the evaluation of the retry policy for 1xx, and 2xx. + if resp.status().is_informational() || resp.status().is_success() { + return None; + } + return Some(FailureKind::EligibleForRetry(resp)); + }, + Err(err) => Self::try_infer_from_error(err.as_ref()), + } + } + + pub fn try_infer_from_error(err: &'a (dyn std::error::Error + 'static)) -> Option> { + if let Some(h_err) = err.downcast_ref::() { + if let Some(source) = h_err.source() { + return Self::try_infer_from_error(source); + } + } + + if let Some(h_err) = err.downcast_ref::() { + if let Some(source) = h_err.source() { + return Self::try_infer_from_error(source); + } + } + + if err.downcast_ref::().is_some() { + // Note: This should never happen, as the user should remap the Tokio timeout + // to a suitable EventError (e.g., timeout(dur, fut).await.map_err(|_| EventError::ConnectTimeout)). + // Just in case, the PerTryTimeout error is the closest one we can choose. + return Some(FailureKind::Event(EventError::PerTryTimeout)); + } + + if let Some(failure) = err.downcast_ref::() { + return Some(FailureKind::Event(*failure)); + } + + if let Some(h2_reason) = err.downcast_ref::().and_then(h2::Error::reason) { + match h2_reason { + h2::Reason::REFUSED_STREAM => return Some(FailureKind::Event(EventError::RefusedStream)), + h2::Reason::CONNECT_ERROR => return Some(FailureKind::Event(EventError::ConnectFailure)), + _ => return Some(FailureKind::Event(EventError::Reset)), + } + } + + if let Some(io_err) = err.downcast_ref::() { + match io_err.kind() { + std::io::ErrorKind::ConnectionRefused | std::io::ErrorKind::NotConnected => { + return Some(FailureKind::Event(EventError::ConnectFailure)) + }, + _ => return Some(FailureKind::Event(EventError::Reset)), + } + } + + // the rest of the errors are remapped to Reset + Some(FailureKind::Event(EventError::Reset)) + } +} + +pub fn should_retry(retry_policy: &RetryPolicy, event: &FailureKind) -> bool { + let response_event = event.inner_response(); + + if let FailureKind::Event(EventError::PerTryTimeout) = event { + return true; + } + + for policy in &retry_policy.retry_on { + match policy { + RetryOn::Err5xx => { + if let Some(resp) = response_event { + let status = resp.status(); + if (500..=599).contains(&status.as_u16()) { + return true; + } + } + }, + RetryOn::GatewayError => { + if let Some(resp) = response_event { + let status = resp.status(); + if (502..=504).contains(&status.as_u16()) { + return true; + } + } + }, + RetryOn::EnvoyRateLimited => { + if let Some(resp) = response_event { + if resp + .headers() + .iter() + .any(|(name, _)| name.as_str() == X_ENVOY_RATELIMITED || name.as_str() == X_ORION_RATELIMITED) + { + return true; + } + } + }, + RetryOn::Retriable4xx => { + if let Some(resp) = response_event { + let status = resp.status(); + if status.as_u16() == 409 { + return true; + } + } + }, + RetryOn::RetriableStatusCodes => { + if let Some(resp) = response_event { + return retry_policy.retriable_status_codes.contains(&resp.status()); + } + }, + RetryOn::RetriableHeaders => { + if let Some(resp) = response_event { + return retry_policy.retriable_headers.iter().any(|hm| hm.response_matches(resp)); + } + }, + RetryOn::Reset => { + if let FailureKind::Event(EventError::Reset) = event { + return true; + } + }, + RetryOn::ConnectFailure => { + if let FailureKind::Event(EventError::ConnectFailure | EventError::ConnectTimeout) = event { + return true; + } + }, + RetryOn::RefusedStream => { + if let FailureKind::Event(EventError::RefusedStream) = event { + return true; + } + }, + RetryOn::Http3PostConnectFailure => { + if let FailureKind::Event(EventError::Http3PostConnectFailure) = event { + return true; + } + }, + } + } + false +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn retry_on() { + assert_eq!("5xx".parse::().unwrap(), RetryOn::Err5xx); + assert_eq!("gateway-error".parse::().unwrap(), RetryOn::GatewayError); + assert_eq!("reset".parse::().unwrap(), RetryOn::Reset); + assert_eq!("connect-failure".parse::().unwrap(), RetryOn::ConnectFailure); + assert_eq!("envoy-ratelimited".parse::().unwrap(), RetryOn::EnvoyRateLimited); + assert_eq!("retriable-4xx".parse::().unwrap(), RetryOn::Retriable4xx); + assert_eq!("refused-stream".parse::().unwrap(), RetryOn::RefusedStream); + assert_eq!("retriable-status-codes".parse::().unwrap(), RetryOn::RetriableStatusCodes); + assert_eq!("retriable-headers".parse::().unwrap(), RetryOn::RetriableHeaders); + assert!("unknown".parse::().is_err()); + } +} diff --git a/orion-lib/src/configuration/mod.rs b/orion-lib/src/configuration/mod.rs new file mode 100644 index 00000000..9089ae32 --- /dev/null +++ b/orion-lib/src/configuration/mod.rs @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_configuration::config::bootstrap::Bootstrap; + +use crate::{ + clusters::cluster::PartialClusterType, listeners::listener::ListenerFactory, ConversionContext, Error, Result, + SecretManager, +}; + +pub fn get_listeners_and_clusters( + bootstrap: Bootstrap, +) -> Result<(SecretManager, Vec, Vec)> { + let static_resources = bootstrap.static_resources; + let secrets = static_resources.secrets; + let mut secret_manager = SecretManager::new(); + secrets.into_iter().try_for_each(|secret| secret_manager.add(secret).map(|_| ()))?; + + let listeners = static_resources + .listeners + .into_iter() + .map(|l| ListenerFactory::try_from(ConversionContext::new((l, &secret_manager)))) + .collect::>>()?; + let clusters = static_resources + .clusters + .into_iter() + .map(|c| PartialClusterType::try_from((c, &secret_manager))) + .collect::>>()?; + if clusters.is_empty() { + //shouldn't happen with new config + return Err::<(SecretManager, Vec<_>, Vec<_>), Error>("No clusters configured".into()); + } + Ok((secret_manager, listeners, clusters)) +} diff --git a/orion-lib/src/lib.rs b/orion-lib/src/lib.rs new file mode 100644 index 00000000..75c88639 --- /dev/null +++ b/orion-lib/src/lib.rs @@ -0,0 +1,123 @@ +#![recursion_limit = "128"] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod configuration; + +pub mod clusters; +mod listeners; +//mod observability; +mod body; +mod secrets; +pub(crate) mod thread_local; +mod transport; +pub(crate) mod utils; + +use std::sync::OnceLock; + +use listeners::listeners_manager; +use orion_configuration::config::Runtime; +use tokio::{sync::mpsc, task::JoinSet}; + +pub use crate::configuration::get_listeners_and_clusters; + +pub use clusters::health::{EndpointHealthUpdate, HealthCheckManager}; +pub use clusters::load_assignment::{LbEndpoint, PartialClusterLoadAssignment}; +pub use clusters::{cluster::PartialClusterType, ClusterLoadAssignmentBuilder}; +pub use listeners::listener::ListenerFactory; +pub use listeners_manager::{ListenerConfigurationChange, ListenersManager, RouteConfigurationChange}; +pub use orion_configuration::config::network_filters::http_connection_manager::RouteConfiguration; +pub use secrets::SecretManager; +pub(crate) use transport::AsyncStream; + +pub type Error = orion_error::Error; +pub type Result = ::core::result::Result; + +pub use crate::body::poly_body::PolyBody; + +pub type HttpBody = PolyBody; + +pub static RUNTIME_CONFIG: OnceLock = OnceLock::new(); + +pub fn runtime_config() -> &'static Runtime { + RUNTIME_CONFIG.get().expect("Called runtime_config without setting RUNTIME_CONFIG first") +} + +pub struct ConversionContext<'a, T> { + envoy_object: T, + secret_manager: &'a SecretManager, +} +impl<'a, T> ConversionContext<'a, T> { + pub fn new(ctx: (T, &'a SecretManager)) -> Self { + Self { envoy_object: ctx.0, secret_manager: ctx.1 } + } +} + +pub struct ConfigurationReceivers { + listener_configuration_receiver: mpsc::Receiver, + route_configuration_receiver: mpsc::Receiver, +} + +#[derive(Clone)] +pub struct ConfigurationSenders { + pub listener_configuration_sender: mpsc::Sender, + pub route_configuration_sender: mpsc::Sender, +} + +impl ConfigurationReceivers { + pub fn new( + listener_configuration_receiver: mpsc::Receiver, + route_configuration_receiver: mpsc::Receiver, + ) -> Self { + Self { listener_configuration_receiver, route_configuration_receiver } + } +} + +impl ConfigurationSenders { + pub fn new( + listener_configuration_sender: mpsc::Sender, + route_configuration_sender: mpsc::Sender, + ) -> Self { + Self { listener_configuration_sender, route_configuration_sender } + } +} + +pub fn new_configuration_channel(capacity: usize) -> (ConfigurationSenders, ConfigurationReceivers) { + let (listener_tx, listener_rx) = mpsc::channel::(capacity); + let (route_tx, route_rx) = mpsc::channel::(capacity); + (ConfigurationSenders::new(listener_tx, route_tx), ConfigurationReceivers::new(listener_rx, route_rx)) +} + +pub fn start_ng_on_joinset(configuration_receivers: ConfigurationReceivers) -> Result>> { + let ConfigurationReceivers { listener_configuration_receiver, route_configuration_receiver } = + configuration_receivers; + + let mut set = JoinSet::new(); + + set.spawn(async move { + let listeners_manager = ListenersManager::new(listener_configuration_receiver, route_configuration_receiver); + if let Err(err) = listeners_manager.start().await { + tracing::warn!("{err}"); + } + Ok(()) + }); + + Ok(set) +} diff --git a/orion-lib/src/listeners/filterchain.rs b/orion-lib/src/listeners/filterchain.rs new file mode 100644 index 00000000..68231eb8 --- /dev/null +++ b/orion-lib/src/listeners/filterchain.rs @@ -0,0 +1,382 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{ + http_connection_manager::{AlpnCodecs, HttpConnectionManager, HttpConnectionManagerBuilder}, + tcp_proxy::{TcpProxy, TcpProxyBuilder}, +}; +use crate::{ + listeners::http_connection_manager::HttpHandlerRequest, + secrets::{TlsConfigurator, WantsToBuildServer}, + transport::AsyncReadWrite, + utils::TokioExecutor, + AsyncStream, ConversionContext, Error, Result, +}; +use compact_str::CompactString; +use futures::TryFutureExt; +use http::Request; +use hyper::service::Service; +use hyper_util::{rt::TokioIo, server::conn::auto::Builder as HyperServerBuilder}; +use orion_configuration::config::{ + listener::{FilterChain as FilterChainConfig, MainFilter}, + network_filters::{ + http_connection_manager::CodecType, + network_rbac::{NetworkContext, NetworkRbac}, + }, +}; +use rustls::{server::Acceptor, ServerConfig}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::net::TcpStream; +use tracing::{debug, warn}; + +#[derive(Debug, Clone)] +pub struct FilterchainType { + pub config: Filterchain, + pub handler: ConnectionHandler, +} + +#[derive(Debug, Clone)] +pub enum ConnectionHandler { + Http(Arc), + Tcp(TcpProxy), +} + +#[derive(Debug, Clone)] +pub struct Filterchain { + pub name: CompactString, + pub rbac_filters: Vec, + pub tls_configurator: Option>, +} + +#[derive(Debug, Clone)] +pub enum MainFilterBuilder { + Http(HttpConnectionManagerBuilder), + Tcp(TcpProxyBuilder), +} + +impl TryFrom> for MainFilterBuilder { + type Error = crate::Error; + fn try_from(ctx: ConversionContext) -> Result { + let ConversionContext { envoy_object: main_filter, secret_manager } = ctx; + match main_filter { + MainFilter::Http(http) => { + Ok(Self::Http(HttpConnectionManagerBuilder::try_from(ConversionContext::new((http, secret_manager)))?)) + }, + MainFilter::Tcp(tcp) => Ok(Self::Tcp(tcp.into())), + } + } +} + +#[derive(Debug, Clone)] +pub struct FilterchainBuilder { + name: CompactString, + listener_name: Option, + main_filter: MainFilterBuilder, + rbac_filters: Vec, + tls_configurator: Option>, +} + +impl FilterchainBuilder { + pub fn with_listener_name(self, name: CompactString) -> Self { + FilterchainBuilder { listener_name: Some(name), ..self } + } + + pub fn build(self) -> Result { + let listener_name = self.listener_name.ok_or("listener name is not set")?; + let filterchain_name = self.name; + let config = Filterchain { + name: filterchain_name, + tls_configurator: self.tls_configurator, + rbac_filters: self.rbac_filters, + }; + let handler = match self.main_filter { + MainFilterBuilder::Http(http_connection_manager) => ConnectionHandler::Http(Arc::new( + http_connection_manager.with_listener_name(listener_name.clone()).build()?, + )), + MainFilterBuilder::Tcp(tcp_proxy) => { + ConnectionHandler::Tcp(tcp_proxy.with_listener_name(listener_name.clone()).build()?) + }, + }; + Ok(FilterchainType { config, handler }) + } +} + +impl TryFrom> for FilterchainBuilder { + type Error = Error; + fn try_from(ctx: ConversionContext) -> std::result::Result { + let ConversionContext { envoy_object: filter_chain, secret_manager } = ctx; + let main_filter = ConversionContext::new((filter_chain.terminal_filter, secret_manager)).try_into()?; + let tls_config = filter_chain.tls_config; + let rbac_filters = filter_chain.rbac; + let tls_configurator = + tls_config.map(|tls_config| TlsConfigurator::try_from((tls_config, secret_manager))).transpose()?; + Ok(FilterchainBuilder { + name: filter_chain.name, + listener_name: None, + main_filter, + rbac_filters, + tls_configurator, + }) + } +} + +impl FilterchainType { + pub fn filter_chain(&self) -> &Filterchain { + &self.config + } + + pub fn apply_rbac( + &self, + stream: TcpStream, + local_addr: SocketAddr, + peer_addr: SocketAddr, + server_name: Option<&str>, + ) -> Option { + let rbac_filters = &self.filter_chain().rbac_filters; + let network_context = NetworkContext::new(local_addr, peer_addr, server_name); + for rbac in rbac_filters { + if !rbac.is_permitted(network_context) { + return None; + } + } + Some(stream) + } + + pub async fn start_filterchain(&self, stream: TcpStream) -> Result<()> { + let Self { config, handler } = self; + match handler { + ConnectionHandler::Http(http_connection_manager) => { + let req_handler = http_connection_manager.request_handler(); + // codec type as given in the listener, not alpn + let codec_type = http_connection_manager.codec_type; + let listener_name = http_connection_manager.listener_name.clone(); + let tls_configurator = config + .tls_configurator + .clone() + .map(TlsConfigurator::::into_inner); + + let peer_addr = stream.peer_addr().map_err(|e| { + warn!("{listener_name} failed to read peer address"); + format!("Failed to read peer address: {e}") + })?; + let (stream, selected_codec) = if let Some(tls_configurator) = tls_configurator { + let (stream, negotiated) = + start_tls(listener_name.clone(), stream, tls_configurator, Some(codec_type)).await?; + // if we negotiated a protocol over ALPN, use that instead of the configured CodecType. + // since we use codec_type to determine our alpn response, we will never negotiate a protocol not covered by codec_type + // if we change our config to support setting the alpn protocols from the TlsContext, we should + // update this code to make sure it doesn't do anything _too_ weird. + let selected_codec = match negotiated { + Some(AlpnCodecs::Http2) => CodecType::Http2, + Some(AlpnCodecs::Http1) => CodecType::Http1, + None => codec_type, + }; + (stream, selected_codec) + } else { + // without TLS no negotiation possible at this point + // perhaps we want to preserve auto here and do an upgrade handshake, but that's pretty messy in the code + // and only useful in the cases where the listener is not using TLS. + // any deployment that does not want to do TLS to downstream, is probably already in the private network + // and would prefer prior-knowledge http2 + let stream: Box = Box::new(stream); + (stream, codec_type) + }; + + debug!("{listener_name} tried to negotiate {codec_type:?}, got {selected_codec:?}"); + let mut hyper_server = HyperServerBuilder::new(TokioExecutor); + let stream = TokioIo::new(stream); + //todo(hayley): we should be applying listener http settings here + hyper_server = match selected_codec { + CodecType::Http1 => hyper_server.http1_only(), + CodecType::Http2 => hyper_server.http2_only(), + CodecType::Auto => hyper_server, + }; + hyper_server + .serve_connection( + stream, + hyper::service::service_fn(|req: Request| { + let handler_req = HttpHandlerRequest { request: req, source_addr: peer_addr }; + req_handler.call(handler_req).map_err(|e| e.inner()) + }), + ) + .await + .inspect_err(|err| debug!("{listener_name} : HTTP connection error: {err}")) + .map_err(Error::from) + }, + ConnectionHandler::Tcp(tcp_proxy) => { + let tcp_proxy = tcp_proxy.clone(); + let listener_name = tcp_proxy.listener_name.clone(); + let server_config = config + .tls_configurator + .clone() + .map(TlsConfigurator::::into_inner); + let (stream, _alpns): (Box, Option) = + if let Some(server_config) = server_config { + start_tls(listener_name.clone(), stream, server_config, None).await? + } else { + (Box::new(stream), None) + }; + + debug!("Starting tcp proxy"); + let res = tcp_proxy.serve_connection(stream).await; + debug!("TcpProxy closed {res:?}"); + res + }, + } + } +} + +fn negotiate_codec_type<'a>(codec_type: CodecType, client_alpns: impl Iterator) -> Option { + let client_alpns = client_alpns.collect::>(); + AlpnCodecs::from_codec(codec_type) + .iter() + .find(|&&desired_proto| client_alpns.contains(&desired_proto.as_ref())) + .copied() +} + +async fn start_tls( + listener_name: CompactString, + stream: TcpStream, + mut config: ServerConfig, + codec_type: Option, +) -> Result<(AsyncStream, Option)> { + let acceptor = tokio_rustls::LazyConfigAcceptor::new(Acceptor::default(), stream); + tokio::pin!(acceptor); + match acceptor.as_mut().await { + Ok(accepted) => { + let client_hello = accepted.client_hello(); + let server_name = client_hello.server_name().unwrap_or("No Address").to_owned(); + debug!( + "{listener_name} server_name {server_name} {codec_type:?} {:?}", + client_hello + .alpn() + .map(|iter| iter.map(|i| String::from_utf8_lossy(i).into_owned()).collect::>()) + ); + + //note(hayley): here we use the CodecType (Http1, H2, Auto) to determine what alpn + // we should offer. however, envoy also has a field commonTlsContext: alpn_protocols: [h2,http/1.1] + // in the TLS config. That one should probably take precedence. + let negotiated_codec_type = match (codec_type, client_hello.alpn()) { + (Some(desired), Some(offered)) => { + if let Some(negotiated_codec_type) = negotiate_codec_type(desired, offered) { + debug!("{listener_name} Negotiated codec type {negotiated_codec_type:?}"); + // note(hayley): do we need to dynamically set this? inspecting the offer vs. desired is useful to log and configure the hyper server + // but maybe we should set this at the listener level and let rustls handle it the handshake. + // since the spec says that rustls has to send a specific error if the client offers only unsupported alpn + config.alpn_protocols = vec![negotiated_codec_type.as_ref().to_owned()]; + Some(negotiated_codec_type) + } else { + // this error message could be better but is a bit of a refactor to get the names + warn!("Couldn't agree on a common codec"); + // set our alpn reply to all the protocols we tried so Rustls can gracefully reject the hello. + config.alpn_protocols = AlpnCodecs::from_codec(desired) + .iter() + .map(|alpn| alpn.as_ref().to_owned()) + .collect::>(); + None + } + }, + (Some(desired), None) => { + warn!("Wanted to negotiate codec {desired:?} but client didn't offer any alpns"); + // note(hayley): + // the envoy docs state that ALPN is preferred when it is available, but if it is not + // protocol inference is used if the codec is set to auto + // since we pass in the Codec from the listener here, not the alpn config, we should accept this. + None + }, + //nothing requested (i.e. tcp proxy), nothing offered + //nothing requested, client offered alpn. + (None, None | Some(_)) => None, + }; + let stream = accepted.into_stream(Arc::new(config)).await.map_err(|e| format!("Can't accept {e:?}"))?; + Ok((Box::new(stream), negotiated_codec_type)) + }, + Err(err) => Err(format!("{listener_name} Can't start tls {err:?}").into()), + } +} + +#[cfg(test)] +mod tests { + use orion_configuration::config::listener::{FilterChainMatch, MatchResult}; + use orion_data_plane_api::decode::from_yaml; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::listener::v3::FilterChainMatch as EnvoyFilterChainMatch; + use std::net::Ipv4Addr; + use tracing_test::traced_test; + + #[traced_test] + #[test] + fn filter_chain_match_empty_sni() { + let m: EnvoyFilterChainMatch = from_yaml( + r" + server_names: [] + ", + ) + .unwrap(); + let m: FilterChainMatch = m.try_into().unwrap(); + let dstport = 443; + let sourceip = Ipv4Addr::new(127, 0, 0, 1).into(); + let srcport = 33000; + assert_eq!(m.matches_destination_ip(sourceip), MatchResult::NoRule); + assert_eq!(m.matches_source_ip(sourceip), MatchResult::NoRule); + assert_eq!(m.matches_destination_port(dstport), MatchResult::NoRule); + assert_eq!(m.matches_source_port(srcport), MatchResult::NoRule); + assert_eq!(m.matches_server_name("host.test"), MatchResult::NoRule); + } + + #[traced_test] + #[test] + fn filter_chain_match_ip_prefix() { + let m: EnvoyFilterChainMatch = + from_yaml("prefix_ranges: [{address_prefix: 192.168.0.0, prefix_len: 24}]").unwrap(); + let m: FilterChainMatch = m.try_into().unwrap(); + assert_eq!(m.matches_destination_ip(Ipv4Addr::new(192, 168, 0, 1).into()), MatchResult::Matched(8)); + assert_eq!(m.matches_destination_ip(Ipv4Addr::new(192, 168, 0, 255).into()), MatchResult::Matched(8)); + assert_eq!(m.matches_destination_ip(Ipv4Addr::new(192, 168, 1, 1).into()), MatchResult::FailedMatch); + assert_eq!(m.matches_destination_ip(Ipv4Addr::new(172, 168, 0, 1).into()), MatchResult::FailedMatch); + assert_eq!(m.matches_source_ip(Ipv4Addr::new(192, 168, 0, 1).into()), MatchResult::NoRule); + } + + #[traced_test] + #[test] + fn filter_chain_wildcards() { + let m: EnvoyFilterChainMatch = from_yaml( + " + server_names: [host.test, \"*.wildcard\"] + destination_port: 443 + source_ports: [3300] + prefix_ranges: [{address_prefix: 127.0.0.1, prefix_len: 32}] + ", + ) + .unwrap(); + let m: FilterChainMatch = m.try_into().unwrap(); + + assert_eq!(m.matches_server_name("host.test"), MatchResult::Matched(0)); + assert_eq!(m.matches_server_name(""), MatchResult::FailedMatch); + + assert_eq!(m.matches_server_name("wildcard"), MatchResult::FailedMatch); + assert_eq!(m.matches_server_name("shost.test"), MatchResult::FailedMatch); + assert_eq!(m.matches_server_name("s.host.test"), MatchResult::FailedMatch); + assert_eq!(m.matches_server_name("notawildcard"), MatchResult::FailedMatch); + + assert_eq!(m.matches_server_name("a.wildcard"), MatchResult::Matched(1)); + assert_eq!(m.matches_server_name("1.a.wildcard"), MatchResult::Matched(2)); + assert_eq!(m.matches_server_name("*.wildcard"), MatchResult::Matched(1)); + } +} diff --git a/orion-lib/src/listeners/http_connection_manager.rs b/orion-lib/src/listeners/http_connection_manager.rs new file mode 100644 index 00000000..b7cccea2 --- /dev/null +++ b/orion-lib/src/listeners/http_connection_manager.rs @@ -0,0 +1,321 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod direct_response; +mod redirect; +mod route; +use route::MatchedRequest; + +use crate::{ + body::timeout_body::TimeoutBody, + listeners::{rate_limiter::LocalRateLimit, synthetic_http_response::SyntheticHttpResponse}, + ConversionContext, HttpBody, PolyBody, Result, RouteConfiguration, +}; +use compact_str::{CompactString, ToCompactString}; +use core::time::Duration; +use futures::{future::BoxFuture, FutureExt}; +use hyper::{body::Incoming, service::Service, Request, Response}; +use orion_configuration::config::network_filters::http_connection_manager::{ + http_filters::{http_rbac::HttpRbac, HttpFilter as HttpFilterConfig, HttpFilterType}, + route::Action, + CodecType, ConfigSource, ConfigSourceSpecifier, HttpConnectionManager as HttpConnectionManagerConfig, RdsSpecifier, + RouteSpecifier, +}; +use std::{ + fmt, + future::{ready, Future}, + net::SocketAddr, + result::Result as StdResult, + sync::Arc, +}; +use tokio::sync::watch; +use tracing::debug; + +#[derive(Debug, Clone)] +pub struct HttpConnectionManagerBuilder { + listener_name: Option, + connection_manager: PartialHttpConnectionManager, +} + +impl TryFrom> for HttpConnectionManagerBuilder { + type Error = crate::Error; + fn try_from(ctx: ConversionContext) -> Result { + let partial = PartialHttpConnectionManager::try_from(ctx)?; + Ok(Self { listener_name: None, connection_manager: partial }) + } +} + +impl HttpConnectionManagerBuilder { + pub fn build(self) -> Result { + let name = self.listener_name.ok_or("listener name is not set")?; + + let PartialHttpConnectionManager { router, codec_type, dynamic_route_name, http_filters, request_timeout } = + self.connection_manager; + + let router_sender = watch::Sender::new(router.map(Arc::new)); + + Ok(HttpConnectionManager { + listener_name: name, + router_sender, + codec_type, + dynamic_route_name, + http_filters, + request_timeout, + }) + } + + pub fn with_listener_name(self, name: CompactString) -> Self { + HttpConnectionManagerBuilder { listener_name: Some(name), ..self } + } +} + +#[derive(Debug, Clone)] +pub struct PartialHttpConnectionManager { + router: Option, + codec_type: CodecType, + dynamic_route_name: Option, + http_filters: Vec, + request_timeout: Option, +} + +#[derive(Debug, Clone)] +pub struct HttpFilter { + pub name: CompactString, + pub disabled: bool, + pub filter: HttpFilterValue, +} + +#[derive(Debug, Clone)] +pub enum HttpFilterValue { + RateLimit(LocalRateLimit), + Rbac(HttpRbac), +} + +impl From for HttpFilter { + fn from(value: HttpFilterConfig) -> Self { + let HttpFilterConfig { name, disabled, filter } = value; + let filter = match filter { + HttpFilterType::RateLimit(r) => HttpFilterValue::RateLimit(r.into()), + HttpFilterType::Rbac(rbac) => HttpFilterValue::Rbac(rbac), + }; + Self { name, disabled, filter } + } +} + +impl HttpFilterValue { + pub fn apply(&self, request: &Request) -> Option> { + match self { + HttpFilterValue::Rbac(rbac) => apply_authorization_rules(rbac, request), + HttpFilterValue::RateLimit(rl) => rl.run(request), + } + } +} + +impl TryFrom> for PartialHttpConnectionManager { + type Error = crate::Error; + fn try_from(ctx: ConversionContext) -> Result { + let ConversionContext { envoy_object: configuration, secret_manager: _ } = ctx; + let codec_type = configuration.codec_type; + + let http_filters = configuration.http_filters.into_iter().map(HttpFilter::from).collect(); + let request_timeout = configuration.request_timeout; + + let (dynamic_route_name, router) = match configuration.route_specifier { + RouteSpecifier::Rds(RdsSpecifier { + route_config_name, + config_source: ConfigSource { config_source_specifier }, + }) => match config_source_specifier { + ConfigSourceSpecifier::ADS => (Some(route_config_name.to_compact_string()), None), + }, + RouteSpecifier::RouteConfig(config) => (None, Some(config)), + }; + + Ok(PartialHttpConnectionManager { router, codec_type, dynamic_route_name, http_filters, request_timeout }) + } +} + +#[derive(Debug, Clone, Copy)] +pub enum AlpnCodecs { + Http1, + Http2, +} + +impl AsRef<[u8]> for AlpnCodecs { + fn as_ref(&self) -> &[u8] { + match self { + Self::Http2 => b"h2", + Self::Http1 => b"http/1.1", + } + } +} + +impl AlpnCodecs { + pub fn from_codec(codec: CodecType) -> &'static [Self] { + match codec { + CodecType::Auto => &[AlpnCodecs::Http2, AlpnCodecs::Http1], + CodecType::Http2 => &[AlpnCodecs::Http2], + CodecType::Http1 => &[AlpnCodecs::Http1], + } + } +} + +#[derive(Debug)] +pub struct HttpConnectionManager { + pub listener_name: CompactString, + router_sender: watch::Sender>>, + pub codec_type: CodecType, + dynamic_route_name: Option, + http_filters: Vec, + request_timeout: Option, +} + +impl fmt::Display for HttpConnectionManager { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "HttpConnectionManager {}", &self.listener_name,) + } +} + +impl HttpConnectionManager { + pub fn get_route_id(&self) -> &Option { + &self.dynamic_route_name + } + + pub fn update_route(&self, route: Arc) { + let _ = self.router_sender.send_replace(Some(route)); + } + + pub fn remove_route(&self) { + let _ = self.router_sender.send_replace(None); + } + + pub(crate) fn request_handler(self: &Arc) -> HttpRequestHandler { + HttpRequestHandler { manager: Arc::clone(self), router: self.router_sender.subscribe() } + } +} + +pub(crate) struct HttpRequestHandler { + manager: Arc, + router: watch::Receiver>>, +} + +pub struct HttpHandlerRequest { + pub request: Request, + pub source_addr: SocketAddr, +} + +// has to be a trait due to foreign impl rules. +pub trait RequestHandler { + fn to_response(self, request: R) -> impl Future>> + Send; +} + +impl RequestHandler<(Request>, SocketAddr)> for Arc { + async fn to_response( + self, + (request, source_address): (Request>, SocketAddr), + ) -> Result> { + // needs some way to resolve the request first, _then_ apply modifications on both request and response from there + // might just want to implement this whole thing as a hyper service "layer" + + let Some(chosen_vh) = self + .virtual_hosts + .iter() + .max_by_key(|vh| vh.domains.iter().map(|domain| domain.eval_lpm_request(&request)).max()) + else { + return Ok(SyntheticHttpResponse::not_found().into_response(request.version())); + }; + let Some((chosen_route, route_match_result)) = chosen_vh + .routes + .iter() + .map(|route| (route, route.route_match.match_request(&request))) + .find(|(_, match_result)| match_result.matched()) + else { + return Ok(SyntheticHttpResponse::not_found().into_response(request.version())); + }; + + //todo(hayley) + // if let Some(response) = apply_filters(http_filters, &chosen_route.typed_per_filter_config, &request) { + // return Ok(response); + // } + let mut response = match &chosen_route.action { + Action::DirectResponse(dr) => dr.to_response(request).await, + Action::Redirect(rd) => rd.to_response((request, route_match_result)).await, + Action::Route(route) => { + route + .to_response(MatchedRequest { + request, + virtual_host: chosen_vh, + route_match: route_match_result, + source_address, + }) + .await + }, + }?; + let resp_headers = response.headers_mut(); + if self.most_specific_header_mutations_wins { + self.response_header_modifier.modify(resp_headers); + chosen_vh.response_header_modifier.modify(resp_headers); + chosen_route.response_header_modifier.modify(resp_headers); + } else { + chosen_route.response_header_modifier.modify(resp_headers); + chosen_vh.response_header_modifier.modify(resp_headers); + self.response_header_modifier.modify(resp_headers); + } + Ok(response) + } +} + +impl Service for HttpRequestHandler { + type Response = Response; + type Error = crate::Error; + type Future = BoxFuture<'static, StdResult>; + + fn call(&self, request: HttpHandlerRequest) -> Self::Future { + let HttpHandlerRequest { request, source_addr } = request; + + let (parts, body) = request.into_parts(); + // optionally apply a timeout to the body. + // envoy says this timeout is started when the request is initiated. This is relatively vague, but because at this point we will + // already have the headers, it seems like a fair start. + // note that we can stil time-out a request due to e.g. the filters taking a long time to compute, or the proxy being overwhelmed + // not just due to the downstream being slow. + // todo(hayley): this timeout is incorrect (checks for time between frames not total time), and doesn't seem to get converted into + // http response + let request = Request::from_parts(parts, TimeoutBody::new(self.manager.request_timeout, body)); + + // if let Some(response) = self.apply_filters(&request) { + // return ready(Ok(response)).boxed(); + // } + + let Some(route_conf) = self.router.borrow().clone() else { + return ready(Ok(SyntheticHttpResponse::not_found().into_response(request.version()))).boxed(); + }; + + route_conf.to_response((request, source_addr)).boxed() + } +} + +fn apply_authorization_rules(rbac: &HttpRbac, req: &Request) -> Option> { + debug!("Applying authorization rules {rbac:?} {:?}", &req.headers()); + if !rbac.is_permitted(req) { + Some(SyntheticHttpResponse::forbidden("RBAC: access denied").into_response(req.version())) + } else { + None + } +} diff --git a/orion-lib/src/listeners/http_connection_manager/direct_response.rs b/orion-lib/src/listeners/http_connection_manager/direct_response.rs new file mode 100644 index 00000000..e5dc83c7 --- /dev/null +++ b/orion-lib/src/listeners/http_connection_manager/direct_response.rs @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::RequestHandler; +use crate::{body::timeout_body::TimeoutBody, PolyBody, Result}; + +use http_body_util::Full; +use hyper::{body::Incoming, Request, Response}; +use orion_configuration::config::network_filters::http_connection_manager::route::DirectResponseAction; + +impl RequestHandler>> for &DirectResponseAction { + async fn to_response(self, request: Request>) -> Result> { + let body = Full::new(self.body.as_ref().map(|b| bytes::Bytes::copy_from_slice(b.data())).unwrap_or_default()); + let mut resp = Response::new(body.into()); + *resp.status_mut() = self.status; + *resp.version_mut() = request.version(); + Ok(resp) + } +} diff --git a/orion-lib/src/listeners/http_connection_manager/redirect.rs b/orion-lib/src/listeners/http_connection_manager/redirect.rs new file mode 100644 index 00000000..380739a0 --- /dev/null +++ b/orion-lib/src/listeners/http_connection_manager/redirect.rs @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::RequestHandler; +use crate::{body::timeout_body::TimeoutBody, Error, HttpBody, PolyBody, Result}; +use http::{ + header::LOCATION, + uri::{Authority, Parts as UriParts, PathAndQuery, Scheme}, + HeaderValue, StatusCode, Uri, +}; +use hyper::{body::Incoming, Request, Response}; +use orion_configuration::config::network_filters::http_connection_manager::route::{ + AuthorityRedirect, RedirectAction, RouteMatchResult, +}; +use orion_error::ResultExtension; +use std::str::FromStr; + +impl RequestHandler<(Request>, RouteMatchResult)> for &RedirectAction { + async fn to_response( + self, + (request, route_match_result): (Request>, RouteMatchResult), + ) -> Result> { + let (parts, _) = request.into_parts(); + let mut rsp = Response::builder().status(StatusCode::from(self.response_code)).version(parts.version); + + let UriParts { scheme: orig_scheme, authority: orig_authority, path_and_query: orig_path_and_query, .. } = + parts.uri.into_parts(); + let orig_host = orig_authority.as_ref().map(Authority::host); + let orig_port = orig_authority.as_ref().map(Authority::port_u16).flatten(); + let authority = match (self.authority_redirect.as_ref(), (orig_host, orig_port)) { + //no redirect + (None, _) => orig_authority, + //full authority redirect OR host redirect with no port in the original uri + (Some(AuthorityRedirect::AuthorityRedirect(a)), _) + | (Some(AuthorityRedirect::HostRedirect(a)), (_, None)) => Some(a.clone()), + (Some(AuthorityRedirect::HostRedirect(h)), (_, Some(port))) => { + if (orig_scheme == Some(Scheme::HTTP) && port == 80) + || (orig_scheme == Some(Scheme::HTTPS) && port == 443) + { + //strip port + Some(h.clone()) + } else { + let uri = format!("{h}:{port}"); + Some(Authority::from_str(&uri).context("invalid uri \"{uri}\"")?) + } + }, + // port redirect with a host in the original uri + (Some(AuthorityRedirect::PortRedirect(port)), (Some(h), _)) => { + let uri = format!("{h}:{port}"); + Some(Authority::from_str(&uri).context("invalid uri \"{uri}\"")?) + }, + // a port redirection with no known host + (Some(AuthorityRedirect::PortRedirect(_)), (None, _)) => { + return Err("tried to perform a port redirection with no host given".into()) + }, + }; + + // strip query if specified + let orig_path_and_query = if let Some(orig) = orig_path_and_query { + if orig.query().is_some() && self.strip_query { + Some(PathAndQuery::from_str(orig.path()).context("failed to strip query")?) + } else { + Some(orig) + } + } else { + None + }; + + let scheme = self.scheme_rewrite_specifier.clone().or(orig_scheme); + + // if this replacement yields a query, it will always overwrite the existing query + let path_and_query = if let Some(prs) = self.path_rewrite_specifier.as_ref() { + if let Some(replacement) = prs + .apply(orig_path_and_query.as_ref(), &route_match_result) + .context("invalid path or query following replacement")? + { + Some(replacement) + } else { + orig_path_and_query + } + } else { + orig_path_and_query + }; + + let new_uri = Uri::from_parts({ + let mut parts = UriParts::default(); + parts.authority = authority; + parts.scheme = scheme; + parts.path_and_query = path_and_query; + parts + }) + .context("failed to reconstruct uri after applying redirect params")?; + let redirect_target = + HeaderValue::from_str(&new_uri.to_string()).context("couldn't convert uri to headervalue")?; + rsp.headers_mut().and_then(|hm| hm.insert(LOCATION, redirect_target)); + rsp.body(HttpBody::default()).map_err(Error::from) + } +} diff --git a/orion-lib/src/listeners/http_connection_manager/route.rs b/orion-lib/src/listeners/http_connection_manager/route.rs new file mode 100644 index 00000000..42187fb6 --- /dev/null +++ b/orion-lib/src/listeners/http_connection_manager/route.rs @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::RequestHandler; +use crate::{ + body::timeout_body::TimeoutBody, + clusters::{balancers::hash_policy::HashState, clusters_manager}, + listeners::synthetic_http_response::SyntheticHttpResponse, + transport::request_context::{RequestContext, RequestWithContext}, + PolyBody, Result, +}; +use http::{uri::Parts as UriParts, Uri}; +use hyper::{body::Incoming, Request, Response}; +use orion_configuration::config::network_filters::http_connection_manager::{ + route::{RouteAction, RouteMatchResult}, + VirtualHost, +}; +use orion_error::ResultExtension; +use std::net::SocketAddr; +use tracing::debug; + +pub struct MatchedRequest<'a> { + pub request: Request>, + pub virtual_host: &'a VirtualHost, + pub route_match: RouteMatchResult, + pub source_address: SocketAddr, +} + +impl<'a> RequestHandler> for &RouteAction { + async fn to_response(self, request: MatchedRequest<'a>) -> Result> { + let MatchedRequest { request, virtual_host, route_match, source_address } = request; + let retry_policy = self.retry_policy.as_ref().or(virtual_host.retry_policy.as_ref()); + //todo(hayley): the envoy docs say + // > The router filter will place the original path before rewrite into the x-envoy-original-path header. + + let hash_state = HashState::new(&self.hash_policy, &request, source_address); + let maybe_channel = clusters_manager::get_http_connection(&self.cluster_specifier, hash_state); + match maybe_channel { + Ok(svc_channel) => { + let ver = request.version(); + let request: Request = { + let (mut parts, body) = request.into_parts(); + let path_and_query_replacement = if let Some(rewrite) = &self.rewrite { + rewrite.apply(parts.uri.path_and_query(), &route_match).context("invalid path after rewrite")? + } else { + None + }; + if path_and_query_replacement.is_some() { + parts.uri = { + let UriParts { scheme, authority, path_and_query: _, .. } = parts.uri.into_parts(); + let mut new_parts = UriParts::default(); + new_parts.scheme = scheme; + new_parts.authority = authority; + new_parts.path_and_query = path_and_query_replacement; + Uri::from_parts(new_parts).context("failed to replace request path_and_query")? + } + } + Request::from_parts(parts, body.into()) + }; + match svc_channel + .to_response(RequestWithContext::with_context( + request, + RequestContext { route_timeout: self.timeout, retry_policy }, + )) + .await + { + Err(err) => { + debug!("HttpConnectionManager Error processing response {:?}", err); + Ok(SyntheticHttpResponse::bad_gateway().into_response(ver)) + }, + Ok(resp) => Ok(resp), + } + }, + Err(err) => { + debug!("Failed to get an HTTP connection: {:?}", err); + Ok(SyntheticHttpResponse::internal_error().into_response(request.version())) + }, + } + } +} diff --git a/orion-lib/src/listeners/listener.rs b/orion-lib/src/listeners/listener.rs new file mode 100644 index 00000000..b712242f --- /dev/null +++ b/orion-lib/src/listeners/listener.rs @@ -0,0 +1,623 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{ + filterchain::{ConnectionHandler, FilterchainBuilder, FilterchainType}, + listeners_manager::TlsContextChange, +}; +use crate::{ + secrets::{TlsConfigurator, WantsToBuildServer}, + transport::{bind_device::BindDevice, tls_inspector::TlsInspector}, + ConversionContext, Error, Result, RouteConfigurationChange, +}; +use compact_str::{CompactString, ToCompactString}; +use orion_configuration::config::listener::{FilterChainMatch, Listener as ListenerConfig, MatchResult}; +use rustls::ServerConfig; +use std::{collections::HashMap, fmt::Debug, net::SocketAddr, sync::Arc}; +use tokio::{ + net::{TcpListener, TcpSocket}, + sync::broadcast::{self}, +}; +use tracing::{debug, info, warn}; + +#[derive(Debug, Clone)] +struct PartialListener { + name: CompactString, + socket_address: std::net::SocketAddr, + bind_device: Option, + filter_chains: HashMap, + with_tls_inspector: bool, +} +#[derive(Debug, Clone)] +pub struct ListenerFactory { + listener: PartialListener, +} + +impl TryFrom> for PartialListener { + type Error = Error; + fn try_from(ctx: ConversionContext<'_, ListenerConfig>) -> std::result::Result { + let ConversionContext { envoy_object: listener, secret_manager } = ctx; + let name = listener.name.to_compact_string(); + let addr = listener.address; + let with_tls_inspector = listener.with_tls_inspector; + debug!("Listener {name} :TLS Inspector is {with_tls_inspector}"); + let filter_chains: HashMap = listener + .filter_chains + .into_iter() + .map(|f| FilterchainBuilder::try_from(ConversionContext::new((f.1, secret_manager))).map(|x| (f.0, x))) + .collect::>()?; + let bind_device = listener.bind_device; + + if !with_tls_inspector { + let has_server_names = filter_chains.keys().any(|m| !m.server_names.is_empty()); + if has_server_names { + return Err((format!( + "Listener '{name}' has server_names in filter_chain_match, but no TLS inspector so matches would always fail" + )).into()); + } + } + + Ok(PartialListener { name, socket_address: addr, bind_device, filter_chains, with_tls_inspector }) + } +} + +impl ListenerFactory { + pub fn make_listener( + self, + route_updates_receiver: broadcast::Receiver, + secret_updates_receiver: broadcast::Receiver, + ) -> Result { + let PartialListener { name, socket_address, bind_device, filter_chains, with_tls_inspector } = self.listener; + + let filter_chains = filter_chains + .into_iter() + .map(|fc| fc.1.with_listener_name(name.clone()).build().map(|x| (fc.0, x))) + .collect::>>()?; + + Ok(Listener { + name, + socket_address, + bind_device, + filter_chains, + with_tls_inspector, + route_updates_receiver, + secret_updates_receiver, + }) + } +} + +impl TryFrom> for ListenerFactory { + type Error = Error; + fn try_from(ctx: ConversionContext<'_, ListenerConfig>) -> std::result::Result { + let listener = PartialListener::try_from(ctx)?; + Ok(Self { listener }) + } +} + +#[derive(Debug)] +pub struct Listener { + name: CompactString, + socket_address: std::net::SocketAddr, + bind_device: Option, + pub filter_chains: HashMap, + with_tls_inspector: bool, + route_updates_receiver: broadcast::Receiver, + secret_updates_receiver: broadcast::Receiver, +} + +impl Listener { + #[cfg(test)] + pub(crate) fn test_listener( + name: &str, + route_rx: broadcast::Receiver, + secret_rx: broadcast::Receiver, + ) -> Self { + use std::net::{IpAddr, Ipv4Addr}; + Listener { + name: name.into(), + socket_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0), + bind_device: None, + filter_chains: HashMap::new(), + with_tls_inspector: false, + route_updates_receiver: route_rx, + secret_updates_receiver: secret_rx, + } + } + + pub fn get_name(&self) -> &CompactString { + &self.name + } + pub fn get_socket(&self) -> (&std::net::SocketAddr, Option<&BindDevice>) { + (&self.socket_address, self.bind_device.as_ref()) + } + + pub async fn start(self) -> Error { + let Self { + name, + socket_address: local_address, + bind_device, + filter_chains, + with_tls_inspector, + mut route_updates_receiver, + mut secret_updates_receiver, + } = self; + let listener = match configure_and_start_tcp_listener(local_address, bind_device.as_ref()) { + Ok(x) => x, + Err(e) => return e, + }; + info!("listener '{name}' started: {local_address}"); + let mut filter_chains = Arc::new(filter_chains); + loop { + tokio::select! { + // here we accept a connection, and then start proccesing it. + // we spawn early so that we don't block other connections from being accepted due to a slow client + maybe_stream = listener.accept() => { + match maybe_stream { + Ok((stream, peer_addr)) => { + let filter_chains = Arc::clone(&filter_chains); + let name = name.clone(); + // spawn a seperate task for handling this client<->proxy connection + // we spawn before we know if we want to process this route because we might need to run the tls_inspector which could + // stall if the client is slow to send the ClientHello and end up blocking the acceptance of new connections + // + // we could optimize a little here by either splitting up the filter_chain selection and rbac into the parts that can run + // before we have the ClientHello and the ones after. since we might already have enough info to decide to drop the connection + // or pick a specific filter_chain to run, or we could simply if-else on the with_tls_inspector variable. + tokio::spawn(Self::process_listener_update(name, filter_chains, with_tls_inspector, local_address, peer_addr, stream)); + }, + Err(e) => {warn!("failed to accept tcp connection: {e}");} + } + }, + maybe_route_update = route_updates_receiver.recv() => { + //todo: add context to the error here once orion-error lands + match maybe_route_update { + Ok(route_update) => {Self::process_route_update(&name, &filter_chains, route_update);} + Err(e) => {return e.into();} + } + }, + maybe_secret_update = secret_updates_receiver.recv() => { + match maybe_secret_update { + Ok(secret_update) => { + // todo: possibly expensive clone - may need to rethink this structure + let mut filter_chains_clone = filter_chains.as_ref().clone(); + Self::process_secret_update(&name, &mut filter_chains_clone, secret_update); + filter_chains = Arc::new(filter_chains_clone); + } + Err(e) => {return e.into();} + } + } + } + } + } + + fn select_filterchain<'a, T>( + filter_chains: &'a HashMap, + source_addr: SocketAddr, + destination_addr: SocketAddr, + server_name: Option<&str>, + ) -> Result> { + //todo: smallvec? other optimization? + let mut possible_filters = vec![true; filter_chains.len()]; + let mut scratchpad = vec![MatchResult::NoRule; filter_chains.len()]; + + fn match_subitem<'a, F: Fn(&FilterChainMatch, T) -> MatchResult, T: Copy>( + function: F, + comparand: T, + iter: impl Iterator, + scratchpad: &mut [MatchResult], + possible_filters: &mut [bool], + ) { + let mut best_match = MatchResult::FailedMatch; + // check all filters still in the running, skipping over those already eliminated + for (i, match_config) in iter.enumerate().filter(|(i, _)| possible_filters[*i]) { + let match_result = function(match_config, comparand); + //mark the outcome of this iteration, and keep track of the best result + scratchpad[i] = match_result; + if match_result > best_match { + best_match = match_result; + } + } + // now trim all the results that failed to match, or were less specific than the best match + for i in 0..scratchpad.len() { + if scratchpad[i] != best_match || scratchpad[i] == MatchResult::FailedMatch { + possible_filters[i] = false; + } + } + } + + match_subitem( + FilterChainMatch::matches_destination_port, + destination_addr.port(), + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + + match_subitem( + FilterChainMatch::matches_destination_ip, + destination_addr.ip(), + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + + match_subitem( + FilterChainMatch::matches_server_name, + server_name.unwrap_or_default(), + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + + match_subitem( + FilterChainMatch::matches_source_ip, + source_addr.ip(), + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + + match_subitem( + FilterChainMatch::matches_source_port, + source_addr.port(), + filter_chains.keys(), + &mut scratchpad, + &mut possible_filters, + ); + + let mut possible_filters = possible_filters + .into_iter() + .zip(filter_chains.iter()) + .filter_map(|(include, item)| include.then_some(item.1)); + + let first_match = possible_filters.next(); + if possible_filters.next().is_some() { + Err("multiple filterchains matched a single connection. This is a bug in orion!".into()) + } else { + Ok(first_match) + } + } + + async fn process_listener_update( + listener_name: CompactString, + filter_chains: Arc>, + with_tls_inspector: bool, + local_address: SocketAddr, + peer_addr: SocketAddr, + mut stream: tokio::net::TcpStream, + ) -> Result<()> { + let server_name = if with_tls_inspector { + let sni = TlsInspector::peek_sni(&mut stream).await; + if let Some(sni) = sni.as_ref() { + debug!("{listener_name} : Detected TLS server name: {sni}"); + } else { + debug!("{listener_name} : No TLS server name detected"); + } + sni + } else { + None + }; + + let selected_filterchain = + Self::select_filterchain(&filter_chains, peer_addr, local_address, server_name.as_deref())?; + if let Some(filterchain) = selected_filterchain { + debug!( + "{listener_name} : mapping connection from {peer_addr} to filter chain {}", + filterchain.filter_chain().name + ); + if let Some(stream) = filterchain.apply_rbac(stream, local_address, peer_addr, server_name.as_deref()) { + return filterchain.start_filterchain(stream).await; + } else { + debug!("{listener_name} : dropped connection from {peer_addr} due to rbac"); + } + } else { + warn!("{listener_name} : No match for {peer_addr} {local_address}"); + } + Ok(()) + } + + //could secrets and routes also be updated through a CachedWatch? + // they only need to be updated when they're read after all and could work with + fn process_secret_update( + listener_name: &str, + filter_chains: &mut HashMap, + secret_update: TlsContextChange, + ) { + match secret_update { + TlsContextChange::Updated((secret_id, secret)) => { + for chain in filter_chains.values_mut() { + let filterchain = &mut chain.config; + if let Some(tls_configurator) = filterchain.tls_configurator.clone() { + let maybe_configurator = TlsConfigurator::::update( + tls_configurator, + &secret_id, + secret.clone(), + ); + if let Ok(new_tls_configurator) = maybe_configurator { + filterchain.tls_configurator = Some(new_tls_configurator); + } else { + let msg = format!( + "{listener_name} Couldn't update a secret for filterchain {} {:?}", + filterchain.name, + maybe_configurator.err() + ); + warn!("{msg}"); + } + } + } + }, + } + } + + fn process_route_update( + listener_name: &str, + filter_chains: &HashMap, + route_update: RouteConfigurationChange, + ) { + match route_update { + RouteConfigurationChange::Added((id, route)) => { + for chain in filter_chains.values() { + if let ConnectionHandler::Http(http_manager) = &chain.handler { + let route_id = http_manager.get_route_id(); + if let Some(route_id) = route_id { + if route_id == id { + debug!("{listener_name} Route updated {id} {route:?}"); + http_manager.update_route(Arc::new(route.clone())); + } + } else { + debug!("{listener_name} Got route update but id doesn't match {route_id:?} {id}"); + } + } + } + }, + RouteConfigurationChange::Removed(id) => { + for chain in filter_chains.values() { + if let ConnectionHandler::Http(http_manager) = &chain.handler { + if let Some(route_id) = http_manager.get_route_id() { + if route_id == id { + http_manager.remove_route(); + } + } + } + } + }, + } + } +} + +fn configure_and_start_tcp_listener(addr: SocketAddr, device: Option<&BindDevice>) -> Result { + let socket = if addr.is_ipv4() { TcpSocket::new_v4()? } else { TcpSocket::new_v6()? }; + socket.set_reuseaddr(true)?; + socket.set_keepalive(true)?; + + if let Some(device) = device { + crate::transport::bind_device::bind_device(&socket, device)?; + } + + #[cfg(all(unix, not(target_os = "solaris"), not(target_os = "illumos")))] + socket.set_reuseport(true)?; + socket.bind(addr)?; + + Ok(socket.listen(128)?) +} + +#[cfg(test)] +mod tests { + use orion_configuration::config::listener::{FilterChainMatch as FilterChainMatchConfig, ServerNameMatch}; + use orion_data_plane_api::decode::from_yaml; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::listener::v3::FilterChainMatch as EnvoyFilterChainMatch; + + use crate::SecretManager; + + use super::*; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::listener::v3::Listener as EnvoyListener; + + use std::net::Ipv4Addr; + use std::str::FromStr; + use tracing_test::traced_test; + + #[test] + fn listener_bind_device() { + const LISTENER: &str = r#" +name: listener_https +address: + socket_address: { address: 0.0.0.0, port_value: 8443 } +filter_chains: + - name: filter_chain + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: HTTP1 + stat_prefix: http + httpFilters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] +socket_options: + - description: "bind to interface virt1" + level: 1 + name: 25 + # utf8 string 'virt1' bytes encoded as base64 + buf_value: dmlydDE= +"#; + + let envoy_listener: EnvoyListener = from_yaml(LISTENER).unwrap(); + let listener = envoy_listener.try_into().unwrap(); + let secrets_manager = SecretManager::new(); + let ctx = ConversionContext::new((listener, &secrets_manager)); + let l = PartialListener::try_from(ctx).unwrap(); + let expected_bind_device = Some(BindDevice::from_str("virt1").unwrap()); + + assert_eq!(&l.bind_device, &expected_bind_device); + } + + #[test] + fn match_fallback_sni() { + let fcm = [ + ( + FilterChainMatch { + destination_port: None, + destination_prefix_ranges: Vec::new(), + server_names: vec![ + ServerNameMatch::from_str("host1.test").unwrap(), + ServerNameMatch::from_str("host2.test").unwrap(), + ], + source_prefix_ranges: Vec::new(), + source_ports: Vec::new(), + }, + 0, + ), + (FilterChainMatch::default(), 1), + ]; + let hashmap: HashMap<_, _> = fcm.iter().cloned().collect(); + let srcaddr = (Ipv4Addr::new(127, 0, 0, 1), 33000).into(); + let selected = + Listener::select_filterchain(&hashmap, srcaddr, (Ipv4Addr::LOCALHOST, 8443).into(), None).unwrap(); + assert_eq!(selected.copied(), Some(1)); + } + + #[traced_test] + #[test] + fn sni_match_without_inspector_fails() { + const LISTENER: &str = r#" +name: listener_https +address: + socket_address: { address: 0.0.0.0, port_value: 8443 } +filter_chains: + - name: filter_chain_https1 + filter_chain_match: + server_names: [hostname.example] + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: HTTP1 + stat_prefix: http + httpFilters: + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] +"#; + + let envoy_listener: EnvoyListener = from_yaml(LISTENER).unwrap(); + let listener = envoy_listener.try_into().unwrap(); + let secrets_man = SecretManager::new(); + + let conv = ConversionContext { envoy_object: listener, secret_manager: &secrets_man }; + let r = PartialListener::try_from(conv); + let err = r.unwrap_err(); + assert!(err + .to_string() + .contains("has server_names in filter_chain_match, but no TLS inspector so matches would always fail")); + } + + #[traced_test] + #[test] + fn filter_chain_multiple() { + let m: EnvoyFilterChainMatch = from_yaml( + " + server_names: [host.test, \"*.wildcard\"] + destination_port: 443 + source_ports: [3300] + prefix_ranges: [{address_prefix: 127.0.0.1, prefix_len: 32}] + ", + ) + .unwrap(); + let m: HashMap = std::iter::once((m.try_into().unwrap(), ())).collect(); + let good_source = (Ipv4Addr::LOCALHOST, 3300).into(); + let good_destination = (Ipv4Addr::LOCALHOST, 443).into(); + let good_host = Some("host.test"); + assert!(matches!(Listener::select_filterchain(&m, good_source, good_destination, good_host), Ok(Some(_)))); + assert!(matches!( + Listener::select_filterchain(&m, good_source, good_destination, Some("a.wildcard")), + Ok(Some(_)) + )); + assert!(matches!(Listener::select_filterchain(&m, good_source, good_destination, None), Ok(None))); + assert!(matches!( + Listener::select_filterchain(&m, good_source, (Ipv4Addr::LOCALHOST, 444).into(), good_host), + Ok(None) + )); + } + + #[test] + fn most_specific_wins() { + let l: EnvoyListener = from_yaml( + " + name: listener + filter_chains: + - filter_chain_match: + server_names: [this.is.more.specific] + - filter_chain_match: + server_names: [\"*.more.specific\"] + - filter_chain_match: + server_names: [\"*.specific\"] + - filter_chain_match: + server_names: [] + ", + ) + .unwrap(); + // let listener : Listener = l.try_into().unwrap(); + let m = l + .filter_chains + .into_iter() + .enumerate() + .map(|(i, fc)| { + fc.filter_chain_match + .map(FilterChainMatchConfig::try_from) + .transpose() + .map(|x| (x.unwrap_or_default(), i)) + }) + .collect::, _>>() + .unwrap(); + let srcaddr = (Ipv4Addr::new(127, 0, 0, 1), 33000).into(); + let dst = (Ipv4Addr::LOCALHOST, 8443).into(); + assert_eq!(Listener::select_filterchain(&m, srcaddr, dst, None).unwrap().copied(), Some(3)); + assert_eq!( + Listener::select_filterchain(&m, srcaddr, dst, Some("this.is.more.specific")).unwrap().copied(), + Some(0) + ); + assert_eq!( + Listener::select_filterchain(&m, srcaddr, dst, Some("not.this.is.more.specific")).unwrap().copied(), + Some(1) + ); + assert_eq!(Listener::select_filterchain(&m, srcaddr, dst, Some("is.more.specific")).unwrap().copied(), Some(1)); + + assert_eq!(Listener::select_filterchain(&m, srcaddr, dst, Some("more.specific")).unwrap().copied(), Some(2)); + assert_eq!( + Listener::select_filterchain(&m, srcaddr, dst, Some("this.is.less.specific")).unwrap().copied(), + Some(2) + ); + + assert_eq!(Listener::select_filterchain(&m, srcaddr, dst, Some("hello.world")).unwrap().copied(), Some(3)); + } +} diff --git a/orion-lib/src/listeners/listeners_manager.rs b/orion-lib/src/listeners/listeners_manager.rs new file mode 100644 index 00000000..42b45bef --- /dev/null +++ b/orion-lib/src/listeners/listeners_manager.rs @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::collections::BTreeMap; + +use compact_str::CompactString; +use tokio::sync::{broadcast, mpsc}; +use tracing::{debug, info, warn}; + +use orion_configuration::config::network_filters::http_connection_manager::RouteConfiguration; + +use super::listener::{Listener, ListenerFactory}; +use crate::{secrets::TransportSecret, Result}; +#[derive(Debug, Clone)] +pub enum ListenerConfigurationChange { + Added(ListenerFactory), + Removed(String), + TlsContextChanged((String, TransportSecret)), +} + +#[derive(Debug, Clone)] +pub enum RouteConfigurationChange { + Added((String, RouteConfiguration)), + Removed(String), +} +#[derive(Debug, Clone)] +pub enum TlsContextChange { + Updated((String, TransportSecret)), +} + +struct ListenerInfo { + handle: abort_on_drop::ChildTask<()>, +} +impl ListenerInfo { + fn new(handle: tokio::task::JoinHandle<()>) -> Self { + Self { handle: handle.into() } + } +} + +pub struct ListenersManager { + configuration_channel: mpsc::Receiver, + route_configuration_channel: mpsc::Receiver, + listener_handles: BTreeMap, +} + +impl ListenersManager { + pub fn new( + configuration_channel: mpsc::Receiver, + route_configuration_channel: mpsc::Receiver, + ) -> Self { + ListenersManager { configuration_channel, route_configuration_channel, listener_handles: BTreeMap::new() } + } + + pub async fn start(mut self) -> Result<()> { + let (tx_secret_updates, _) = broadcast::channel(16); + let (tx_route_updates, _) = broadcast::channel(16); + + loop { + tokio::select! { + Some(listener_configuration_change) = self.configuration_channel.recv() => { + match listener_configuration_change { + ListenerConfigurationChange::Added(factory) => { + let listener = factory + .make_listener(tx_route_updates.subscribe(), tx_secret_updates.subscribe())?; + if let Err(e) = self.start_listener(listener) { + warn!("Failed to start listener: {e}"); + } + } + ListenerConfigurationChange::Removed(listener_name) => { + let _ = self.stop_listener(&listener_name); + }, + ListenerConfigurationChange::TlsContextChanged((secret_id, secret)) => { + info!("Got tls secret update {secret_id}"); + let res = tx_secret_updates.send(TlsContextChange::Updated((secret_id, secret))); + if let Err(e) = res{ + warn!("Internal problem when updating a secret: {e}"); + } + + }, + } + }, + Some(route_configuration_change) = self.route_configuration_channel.recv() => { + // routes could be CachedWatch instead, as they are evaluated lazilly + let res = tx_route_updates.send(route_configuration_change); + if let Err(e) = res{ + warn!("Internal problem when updating a route: {e}"); + } + }, + else => { + warn!("All listener manager channels are closed...exiting"); + return Err("All listener manager channels are closed...exiting".into()); + } + } + } + } + + pub fn start_listener(&mut self, listener: Listener) -> Result<()> { + let listener_name = listener.get_name().clone(); + let (addr, dev) = listener.get_socket(); + info!("Listener {} at {addr} (device bind:{})", listener_name, dev.is_some()); + let listener_name_co = listener_name.clone(); + // spawn the task for this listener address, this will spawn additional task per connection + let join_handle = tokio::spawn(async move { + let error = listener.start().await; + warn!("Listener {listener_name_co} exited: {error}"); + }); + #[cfg(debug_assertions)] + if self.listener_handles.contains_key(&listener_name) { + debug!("Listener {listener_name} already exists, replacing it"); + } + // note: join handle gets overwritten here if it already exists. + // handles are abort on drop so will be aborted, closing the socket + // but the any tasks spawned within this task, which happens on a per-connection basis, + // will survive past this point and only get dropped when their session ends + self.listener_handles.insert(listener_name, ListenerInfo::new(join_handle)); + + Ok(()) + } + + pub fn stop_listener(&mut self, listener_name: &str) -> Result<()> { + if let Some(abort_handler) = self.listener_handles.remove(listener_name) { + info!("{listener_name} : Stopped"); + abort_handler.handle.abort(); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tracing_test::traced_test; + + #[traced_test] + #[tokio::test] + async fn start_listener_dup() { + let chan = 10; + let name = "testlistener"; + + let (_conf_tx, conf_rx) = mpsc::channel(chan); + let (_route_tx, route_rx) = mpsc::channel(chan); + let mut man = ListenersManager::new(conf_rx, route_rx); + + let (routeb_tx1, routeb_rx) = broadcast::channel(chan); + let (_secb_tx1, secb_rx) = broadcast::channel(chan); + let l1 = Listener::test_listener(name, routeb_rx, secb_rx); + man.start_listener(l1).unwrap(); + assert!(routeb_tx1.send(RouteConfigurationChange::Removed("n/a".into())).is_ok()); + tokio::task::yield_now().await; + + let (routeb_tx2, routeb_rx) = broadcast::channel(chan); + let (_secb_tx2, secb_rx) = broadcast::channel(chan); + let l2 = Listener::test_listener(name, routeb_rx, secb_rx); + man.start_listener(l2).unwrap(); + assert!(routeb_tx2.send(RouteConfigurationChange::Removed("n/a".into())).is_ok()); + tokio::task::yield_now().await; + + // This should fail because the old listener exited already dropping the rx + assert!(routeb_tx1.send(RouteConfigurationChange::Removed("n/a".into())).is_err()); + // Yield once more just in case more logs can be seen + tokio::task::yield_now().await; + } + + #[traced_test] + #[tokio::test] + async fn start_listener_shutdown() { + let chan = 10; + let name = "my-listener"; + + let (_conf_tx, conf_rx) = mpsc::channel(chan); + let (_route_tx, route_rx) = mpsc::channel(chan); + let mut man = ListenersManager::new(conf_rx, route_rx); + + let (routeb_tx1, routeb_rx) = broadcast::channel(chan); + let (secb_tx1, secb_rx) = broadcast::channel(chan); + let l1 = Listener::test_listener(name, routeb_rx, secb_rx); + man.start_listener(l1).unwrap(); + + drop(routeb_tx1); + drop(secb_tx1); + tokio::task::yield_now().await; + + // See .start_listener() - in the case all channels are dropped the task there + // should exit with this warning msg + let expected = format!("Listener {name} exited: channel closed"); + logs_assert(|lines: &[&str]| { + let logs: Vec<_> = lines.iter().filter(|ln| ln.contains(&expected)).collect(); + if logs.len() == 1 { + Ok(()) + } else { + Err(format!("Expecting 1 log line for listener shutdown (got {})", logs.len())) + } + }); + } +} diff --git a/orion-lib/src/listeners/mod.rs b/orion-lib/src/listeners/mod.rs new file mode 100644 index 00000000..37516e07 --- /dev/null +++ b/orion-lib/src/listeners/mod.rs @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub(crate) mod filterchain; +pub(crate) mod http_connection_manager; +pub(crate) mod listener; +pub(crate) mod listeners_manager; +pub(crate) mod rate_limiter; +pub(crate) mod synthetic_http_response; +pub(crate) mod tcp_proxy; diff --git a/orion-lib/src/listeners/rate_limiter/mod.rs b/orion-lib/src/listeners/rate_limiter/mod.rs new file mode 100644 index 00000000..c5fe5776 --- /dev/null +++ b/orion-lib/src/listeners/rate_limiter/mod.rs @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod token_bucket; + +use std::sync::Arc; + +use http::status::StatusCode; +use http::Request; +use hyper::Response; + +use token_bucket::TokenBucket; + +use orion_configuration::config::network_filters::http_connection_manager::http_filters::local_rate_limit::LocalRateLimit as LocalRateLimitConfig; + +use crate::listeners::synthetic_http_response::SyntheticHttpResponse; +use crate::{runtime_config, HttpBody}; + +#[derive(Debug, Clone)] +pub struct LocalRateLimit { + pub status: StatusCode, + pub token_bucket: Arc, +} + +impl LocalRateLimit { + pub fn run(&self, req: &Request) -> Option> { + if !self.token_bucket.consume(1) { + let status = self.status; + return Some(SyntheticHttpResponse::custom_error(status).into_response(req.version())); + } + None + } +} + +impl From for LocalRateLimit { + fn from(rate_limit: LocalRateLimitConfig) -> Self { + let status = rate_limit.status; + let max_tokens = rate_limit.max_tokens; + let tokens_per_fill = rate_limit.tokens_per_fill; + let fill_interval = rate_limit.fill_interval; + let token_bucket = Arc::new(TokenBucket::new( + max_tokens, + tokens_per_fill, + fill_interval.checked_mul(runtime_config().num_runtimes.into()).expect("too many runtimes (overflow)"), + )); + + Self { status, token_bucket } + } +} diff --git a/orion-lib/src/listeners/rate_limiter/token_bucket.rs b/orion-lib/src/listeners/rate_limiter/token_bucket.rs new file mode 100644 index 00000000..45c5cba9 --- /dev/null +++ b/orion-lib/src/listeners/rate_limiter/token_bucket.rs @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::hash::Hash; +use std::sync::atomic::Ordering; + +use atomic_time::AtomicInstant; +use std::fmt::Debug; +use std::time::{Duration, Instant}; + +/// This TokenBucket implementation takes inspiration from `` +pub struct TokenBucket { + time: AtomicInstant, + time_per_token: Duration, + time_per_bucket: Duration, + max_tokens: usize, +} + +impl TokenBucket { + /// Construct a TokenBucket + // + /// * `max_token`: The maximum tokens that the bucket can hold. + /// * `tokens_per_fill`: The number of tokens added to the bucket during each fill interval. + /// * `fill_interval`: The fill interval that tokens are added to the bucket. During each fill interval tokens_per_fill are added to the bucket. + pub fn new(max_tokens: u32, tokens_per_fill: u32, fill_interval: Duration) -> TokenBucket { + let time_per_bucket = max_tokens * fill_interval / tokens_per_fill; + let now = Instant::now(); + Self { + time: AtomicInstant::new(now.checked_sub(2 * time_per_bucket).unwrap_or(now)), + time_per_token: fill_interval / tokens_per_fill, + max_tokens: max_tokens as usize, + time_per_bucket, + } + } + + /// Try consuming a number of tokens and returns a boolean + /// that represents the outcome. + /// * `tokens`: The number of tokens to consume. + #[allow(dead_code)] + pub fn consume(&self, tokens: u32) -> bool { + let req_fill_period = self.time_per_token * tokens; + let now = Instant::now(); + let min_time = now.checked_sub(self.time_per_bucket).unwrap_or(now); + + let mut old_time = self.time.load(Ordering::Relaxed); + + loop { + let mut new_time = old_time; + if min_time > new_time { + new_time = min_time; + } + new_time += req_fill_period; + if new_time > now { + return false; + } + + match self.time.compare_exchange_weak(old_time, new_time, Ordering::Relaxed, Ordering::Relaxed) { + Ok(_) => return true, + Err(x) => old_time = x, + } + } + } + + /// Return the capacity of the bucket, in term of tokens. + #[allow(dead_code)] + pub fn capacity(&self) -> usize { + self.max_tokens + } + + /// Return the actual bucket size. + #[allow(dead_code)] + pub fn size(&self) -> usize { + let now = Instant::now(); + let t = self.time.load(Ordering::Relaxed); + if now < t { + return 0; + } + + let n = ((now - t).as_nanos() / self.time_per_token.as_nanos()) as usize; + + if n > self.max_tokens { + self.max_tokens + } else { + n + } + } +} + +impl Debug for TokenBucket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TokenBucket") + .field("time", &self.time.load(Ordering::Relaxed)) + .field("time_per_token", &self.time_per_token) + .field("time_per_bucket", &self.time_per_bucket) + .field("max_tokens", &self.max_tokens) + .finish() + } +} + +impl PartialEq for TokenBucket { + fn eq(&self, other: &Self) -> bool { + self.time_per_token == other.time_per_token && self.time_per_bucket == other.time_per_bucket + } +} + +impl Clone for TokenBucket { + fn clone(&self) -> Self { + Self { + time: AtomicInstant::new(self.time.load(Ordering::Relaxed)), + time_per_token: self.time_per_token, + time_per_bucket: self.time_per_bucket, + max_tokens: self.max_tokens, + } + } +} + +impl Eq for TokenBucket {} + +impl Hash for TokenBucket { + fn hash(&self, state: &mut H) { + self.time.load(Ordering::Relaxed).hash(state); + self.time_per_token.hash(state); + self.time_per_bucket.hash(state); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use std::{ + sync::{Arc, Barrier}, + thread::{self, sleep}, + }; + + #[test] + fn token_bucket_burst() { + let tb = TokenBucket::new(5, 1, Duration::from_millis(10)); + + assert_eq!(tb.capacity(), 5); + assert_eq!(tb.size(), 5); + + // consume the burst + + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(!tb.consume(1)); + + assert_eq!(tb.capacity(), 5); + assert_eq!(tb.size(), 0); + + // wait for the full refill + + sleep(Duration::from_millis(50)); + + assert_eq!(tb.capacity(), 5); + assert_eq!(tb.size(), 5); + + // consume the burst + + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(!tb.consume(1)); + } + + #[test] + fn token_bucket_basic() { + let tb = TokenBucket::new(5, 1, Duration::from_millis(10)); + + // consume the burst + + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(!tb.consume(1)); + + // wait for the full refill + + sleep(Duration::from_millis(10)); + + // consume a token and then bucket is empty + + assert!(tb.consume(1)); + + sleep(Duration::from_millis(10)); + + // consume a token + + assert!(tb.consume(1)); + + sleep(Duration::from_millis(10)); + + assert!(tb.consume(1)); + + sleep(Duration::from_millis(10)); + } + + #[test] + fn token_bucket_multi_thread() { + eprintln!("starting multithreaded test..."); + sleep(Duration::from_secs(5)); + + let n = 10; + let mut handles = Vec::with_capacity(n); + let barrier = Arc::new(Barrier::new(n)); + let tb = Arc::new(TokenBucket::new(50, 10, Duration::from_millis(10))); + + for i in 0..n { + let tb = Arc::clone(&tb); + let bar = Arc::clone(&barrier); + println!("running thread {i}..."); + handles.push(thread::spawn(move || { + // consume all the token first... + + while tb.consume(1) {} + + bar.wait(); + + sleep(Duration::from_millis(51)); + + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + assert!(tb.consume(1)); + })); + } + + eprintln!("waiting handles..."); + for handle in handles { + handle.join().unwrap(); + } + } +} diff --git a/orion-lib/src/listeners/synthetic_http_response.rs b/orion-lib/src/listeners/synthetic_http_response.rs new file mode 100644 index 00000000..4d7307c5 --- /dev/null +++ b/orion-lib/src/listeners/synthetic_http_response.rs @@ -0,0 +1,474 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::HttpBody; +use bytes::Bytes; +use http::uri::{InvalidUri, InvalidUriParts}; +use http::{HeaderValue, Version as HttpVersion}; +use http::{Response, StatusCode}; +use http_body_util::Full; + +#[derive(Debug, thiserror::Error)] +pub enum InvalidSyntheticResponse { + #[error(transparent)] + InvalidHttpResponse(#[from] http::Error), + #[error(transparent)] + InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), + #[error(transparent)] + InvalidUri(#[from] InvalidUri), + #[error(transparent)] + InvalidUriParts(#[from] InvalidUriParts), +} + +#[derive(Clone, Debug)] +pub struct SyntheticHttpResponse { + http_status: StatusCode, + body: Bytes, + close_connection: bool, +} + +// === impl SyntheticHttpResponse === + +impl SyntheticHttpResponse { + pub fn internal_error() -> Self { + Self { http_status: StatusCode::INTERNAL_SERVER_ERROR, body: Bytes::default(), close_connection: true } + } + + pub fn bad_gateway() -> Self { + Self { http_status: StatusCode::BAD_GATEWAY, body: Bytes::default(), close_connection: true } + } + + pub fn forbidden(msg: &str) -> Self { + Self { + http_status: StatusCode::FORBIDDEN, + body: Bytes::copy_from_slice(msg.as_bytes()), + //should this close actually? the connection seems to stay open since it's only triggered for a single http + close_connection: true, + } + } + + #[allow(dead_code)] + pub fn unavailable() -> Self { + Self { http_status: StatusCode::SERVICE_UNAVAILABLE, body: Bytes::default(), close_connection: true } + } + + pub fn gateway_timeout() -> Self { + Self { http_status: StatusCode::GATEWAY_TIMEOUT, body: Bytes::default(), close_connection: true } + } + + pub fn not_found() -> Self { + Self { http_status: StatusCode::NOT_FOUND, body: Bytes::default(), close_connection: false } + } + + #[allow(dead_code)] + pub fn custom_error(http_status: StatusCode) -> Self { + Self { http_status, body: Bytes::default(), close_connection: false } + } + + // #[inline] + // fn header_error_message(&self) -> Option { + // match self.header_error_message { + // Some(Cow::Borrowed(msg)) => Some(HeaderValue::from_static(msg)), + // Some(Cow::Owned(ref msg)) => { + // Some(HeaderValue::from_str(msg).unwrap_or_else(|_| HeaderValue::from_static("unexpected error"))) + // }, + // None => None, + // } + // } + + #[inline] + pub fn into_response(self, version: http::Version) -> Response { + let mut rsp = Response::new(Full::from(self.body).into()); + *rsp.status_mut() = self.http_status; + *rsp.version_mut() = version; + if self.close_connection && (version == HttpVersion::HTTP_10 || version == HttpVersion::HTTP_11) { + // Notify the (proxy or non-proxy) client that the connection will be closed. + rsp.headers_mut().insert(http::header::CONNECTION, HeaderValue::from_static("close")); + } + rsp + } +} + +#[cfg(test)] +mod tests { + // use http::{uri::Scheme, Uri}; + // use orion_configuration::config::network_filters::http_connection_manager::route::RedirectResponseCode; + + // use super::*; + + // #[test] + // fn test_basic_redirect() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:81", + // ))), + // strip_query: false, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:81/foo/bar?baz")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_redirect_strip_query() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:81", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:81/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_http2_redirect_1() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com:80/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::HostRedirect(Authority::from_static("www.redirected.com"))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTPS), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("https://www.redirected.com/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_https_redirect_2() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com:80/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::HostRedirect(Authority::from_static("www.redirected.com"))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTPS), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("https://www.redirected.com/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_https_redirect_3() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:8080", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTPS), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("https://www.redirected.com:8080/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_scheme_redirect_1() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:80", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTPS), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("https://www.redirected.com:80/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_scheme_redirect_2() -> Result<(), InvalidSyntheticResponse> { + // let uri = "https://www.test.com:443/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTP), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_scheme_redirect_3() -> Result<(), InvalidSyntheticResponse> { + // let uri = "https://www.test.com:443/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: None, + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTP), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.test.com/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_scheme_redirect_4() -> Result<(), InvalidSyntheticResponse> { + // let uri = "https://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:443", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: Some(Scheme::HTTP), + // path_rewrite_specifier: None, + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:443/foo/bar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_path_rewrite_1() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:80", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: Some(PathRewriteSpecifier::Path(PathAndQuery::from_str("/hello/world")?)), + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:80/hello/world")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_path_rewrite_2() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:80", + // ))), + // strip_query: false, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: Some(PathRewriteSpecifier::Path(PathAndQuery::from_str("/hello/world")?)), + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:80/hello/world?baz")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn test_path_rewrite_3() -> Result<(), InvalidSyntheticResponse> { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:80", + // ))), + // strip_query: false, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: Some(PathRewriteSpecifier::Path(PathAndQuery::from_str("/hello/world?foobar")?)), + // }; + + // let res = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // Some(uri), + // None, + // )?; + + // let expected = &HeaderValue::from_str("http://www.redirected.com:80/hello/world?foobar")?; + + // assert_eq!(res.headers().get(http::header::LOCATION), Some(expected)); + + // Ok(()) + // } + + // #[test] + // fn redirect_unexpected_status_code() { + // let uri = "http://www.test.com/foo/bar?baz".parse::().unwrap(); + + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:81", + // ))), + // strip_query: true, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: None, + // }; + + // let result = + // SyntheticHttpResponse::redirect(StatusCode::OK, ra).into_response(http::Version::HTTP_11, Some(uri), None); + + // assert!(matches!(result, Err(InvalidSyntheticResponse::RedirectUnexpectedStatusCode))); + // } + + // #[test] + // fn redirect_missing_uri() { + // let ra = RedirectAction { + // response_code: RedirectResponseCode::TemporaryRedirect, + // authority_redirect: Some(AuthorityRedirect::AuthorityRedirect(Authority::from_static( + // "www.redirected.com:81", + // ))), + // strip_query: false, + // scheme_rewrite_specifier: None, + // path_rewrite_specifier: None, + // }; + + // let result = SyntheticHttpResponse::redirect(StatusCode::TEMPORARY_REDIRECT, ra).into_response( + // http::Version::HTTP_11, + // None, + // None, + // ); + + // assert!(matches!(result, Err(InvalidSyntheticResponse::RedirectMissingUri))); + // } +} diff --git a/orion-lib/src/listeners/tcp_proxy.rs b/orion-lib/src/listeners/tcp_proxy.rs new file mode 100644 index 00000000..27d81c37 --- /dev/null +++ b/orion-lib/src/listeners/tcp_proxy.rs @@ -0,0 +1,77 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::{clusters::clusters_manager, AsyncStream, Result}; +use compact_str::CompactString; +use orion_configuration::config::{ + cluster::ClusterSpecifier as ClusterSpecifierConfig, network_filters::tcp_proxy::TcpProxy as TcpProxyConfig, +}; +use std::fmt; +use tracing::debug; + +#[derive(Debug, Clone)] +pub struct TcpProxy { + pub listener_name: CompactString, + cluster: ClusterSpecifierConfig, +} + +#[derive(Debug, Clone)] +pub struct TcpProxyBuilder { + listener_name: Option, + tcp_proxy_config: TcpProxyConfig, +} + +impl From for TcpProxyBuilder { + fn from(tcp_proxy_config: TcpProxyConfig) -> Self { + Self { tcp_proxy_config, listener_name: None } + } +} + +impl TcpProxyBuilder { + pub fn with_listener_name(self, name: CompactString) -> Self { + TcpProxyBuilder { listener_name: Some(name), ..self } + } + pub fn build(self) -> Result { + let listener_name = self.listener_name.ok_or("listener name is not set")?; + let TcpProxyConfig { cluster_specifier } = self.tcp_proxy_config; + Ok(TcpProxy { listener_name, cluster: cluster_specifier }) + } +} + +impl fmt::Display for TcpProxy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TcpProxy").field("name", &self.listener_name).finish() + } +} + +impl TcpProxy { + pub async fn serve_connection(&self, mut stream: AsyncStream) -> Result<()> { + let cluster_selector = &self.cluster; + let maybe_channel = clusters_manager::get_tcp_connection(cluster_selector); + if let Ok(channel) = maybe_channel { + let mut channel = channel.await?; + let res = tokio::io::copy_bidirectional(&mut stream, &mut channel).await; + debug!("TCP Connection closed {res:?}"); + Ok(()) + } else { + Err("No route for domain".into()) + } + } +} diff --git a/orion-lib/src/observability/mod.rs b/orion-lib/src/observability/mod.rs new file mode 100644 index 00000000..c936694c --- /dev/null +++ b/orion-lib/src/observability/mod.rs @@ -0,0 +1,102 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod opentelemetry; +use std::sync::atomic::{AtomicI64, AtomicU64, Ordering}; + +static STATS: StatisticsHolder = StatisticsHolder::new(); + +// a ZST that exposes access to the static StatisticsHolder +// makes the exposed API a bit more rust-y and lets us swap the implementation out or extend it easier later on +// e.g. when we add a struct that requires obtaining a lock, or we wish to hand out handles which only +// update the atomics on drop to reduce the amount of inter-core syncs. +pub struct Statistics; +impl Statistics { + pub fn listener() -> &'static ListenerStatistics { + &STATS.listener + } + + pub fn http() -> &'static HttpStatistics { + &STATS.http + } +} + +pub struct StatisticsHolder { + listener: ListenerStatistics, + http: HttpStatistics, +} + +impl StatisticsHolder { + const fn new() -> Self { + Self { + listener: ListenerStatistics::new(), + http: HttpStatistics::new(), + } + } +} + +#[derive(Default, Debug)] +pub struct ListenerStatistics { + total_connections: AtomicU64, + active_connections: AtomicI64, +} + +impl ListenerStatistics { + const fn new() -> Self { + Self { + total_connections: AtomicU64::new(0), + active_connections: AtomicI64::new(0), + } + } + pub fn _add_connection(&self) { + self.total_connections.fetch_add(1, Ordering::Relaxed); + self.active_connections.fetch_add(1, Ordering::Relaxed); + } + pub fn remove_connection(&self) { + self.active_connections.fetch_sub(1, Ordering::Relaxed); + } + + pub fn total_connections(&self) -> u64 { + self.total_connections.load(Ordering::Relaxed) + } + + pub fn active_connections(&self) -> i64 { + self.active_connections.load(Ordering::Relaxed) + } +} +pub struct HttpStatistics { + gateway_errors: AtomicU64, +} + +impl HttpStatistics { + const fn new() -> Self { + Self { + gateway_errors: AtomicU64::new(0), + } + } + + pub fn _increment_error(&self) { + self.gateway_errors.fetch_add(1, Ordering::Relaxed); + } + + pub fn gateway_errors(&self) -> u64 { + self.gateway_errors.load(Ordering::Relaxed) + } +} diff --git a/orion-lib/src/observability/opentelemetry.rs b/orion-lib/src/observability/opentelemetry.rs new file mode 100644 index 00000000..431b0766 --- /dev/null +++ b/orion-lib/src/observability/opentelemetry.rs @@ -0,0 +1,60 @@ +#![allow(dead_code)] +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod metric_exporters; + +use std::time::Duration; + +use metric_exporters::*; +use opentelemetry::{global, KeyValue}; + +use crate::Result; + +pub fn init_metrics_exporter(config: Config) -> Result<()> { + use Config::*; + match config { + StdOut => stdout::init(), + Prometheus { socket_address } => prometheus::init(socket_address), + OtelPusher { + endpoint, + push_interval_seconds, + } => otel::init(endpoint, Duration::from_secs_f32(push_interval_seconds)), + } +} + +const ENVOY_LISTENER_NAME_KEY: &str = "envoy-listener-name"; + +//todo: these binds us to the opentelemetry crate which is alpha/beta and not great quality. +// we probably want to replace these with a more loose wrapper. +// but the question is whether or not tagging is even something that is supported or wanted +// by HQ. If not, atomic counters work. If so, a lockfree hashmap both paired with an opentelemetry observer. + +pub fn add_listener_connection(listener_name: String) { + let meter = global::meter("listeners"); + let counter = meter.u64_counter("connections").init(); + counter.add(1, &[KeyValue::new(ENVOY_LISTENER_NAME_KEY, listener_name)]); +} + +pub fn add_proxy_error(listener_name: String) { + let meter = global::meter("http"); + let counter = meter.u64_counter("proxy-errors").init(); + counter.add(1, &[KeyValue::new(ENVOY_LISTENER_NAME_KEY, listener_name)]); +} diff --git a/orion-lib/src/observability/opentelemetry/metric_exporters.rs b/orion-lib/src/observability/opentelemetry/metric_exporters.rs new file mode 100644 index 00000000..d99ab98c --- /dev/null +++ b/orion-lib/src/observability/opentelemetry/metric_exporters.rs @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::net::SocketAddr; + +use opentelemetry::{global, metrics::MeterProvider as _}; +use opentelemetry_sdk::metrics::MeterProvider; +use serde::Deserialize; + +use crate::{observability::Statistics, Result}; + +#[derive(Deserialize, Debug)] +#[serde(tag = "type")] +pub enum Config { + StdOut, + Prometheus { + socket_address: SocketAddr, + }, + OtelPusher { + endpoint: String, + #[serde(default = "default_push_interval_seconds")] + push_interval_seconds: f32, + }, //endpoint takes a string, but should be URI? +} + +//need to use a function for this because of https://github.com/serde-rs/serde/issues/368 +// grumble grumble +fn default_push_interval_seconds() -> f32 { + 5.0 +} + +pub fn register_observable_metrics(provider: &MeterProvider) -> Result<()> { + let meter = provider.meter("AtomicObservables"); + meter + .u64_observable_counter("non_2xx") + .with_description( + "total amount of replies with a status code other than 2xx send by this proxy", + ) + .with_callback(|i| i.observe(Statistics::http().gateway_errors(), &[])) + .try_init()?; + meter + .u64_observable_counter("n_connections") + .with_description("total amount of connections seen by this proxy") + .with_callback(|i| i.observe(Statistics::listener().total_connections(), &[])) + .try_init()?; + meter + .i64_observable_gauge("active_connections") + .with_description("current number of inbound connections") + .with_callback(|i| i.observe(Statistics::listener().active_connections(), &[])) + .try_init()?; + Ok(()) +} + +pub fn init_and_set_global_meter_provider(meter_provider: MeterProvider) -> Result<()> { + crate::observability::opentelemetry::register_observable_metrics(&meter_provider)?; + global::set_meter_provider(meter_provider); + Ok(()) +} + +pub mod otel; + +#[cfg(not(feature = "metrics-stdout"))] +pub mod stdout { + use crate::Result; + pub fn init() -> Result<()> { + Err("Tried to use stdout logger for metrics, but this version is not compiled with --features metric-stdout".into()) + } +} + +#[cfg(feature = "metrics-stdout")] +pub mod stdout; +#[cfg(not(feature = "metrics-prometheus"))] +pub mod prometheus { + use std::net::SocketAddr; + + use crate::Result; + + pub fn init(addr: SocketAddr) -> Result<()> { + Err(format!("Tried to use prometheus logger for metrics on {addr:?}, but this version is not compiled with --features metric-prometheus").into()) + } +} + +#[cfg(feature = "metrics-prometheus")] +pub mod prometheus; diff --git a/orion-lib/src/observability/opentelemetry/metric_exporters/otel.rs b/orion-lib/src/observability/opentelemetry/metric_exporters/otel.rs new file mode 100644 index 00000000..39bd0e35 --- /dev/null +++ b/orion-lib/src/observability/opentelemetry/metric_exporters/otel.rs @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::time::Duration; + +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_sdk::{ + metrics::{MeterProvider, PeriodicReader}, + runtime, +}; +use tracing::info; + +use crate::Result; + +pub fn init(endpoint: String, push_interval: Duration) -> Result<()> { + info!( + "pushing opentelemetry metrics to {endpoint} every {:.4} seconds", + push_interval.as_secs_f32() + ); + let export_config = opentelemetry_otlp::ExportConfig { + endpoint, + ..opentelemetry_otlp::ExportConfig::default() + }; + let exporter = opentelemetry_otlp::new_exporter() + .tonic() + .with_export_config(export_config) + .build_metrics_exporter( + Box::new(opentelemetry_sdk::metrics::reader::DefaultAggregationSelector::new()), + Box::new(opentelemetry_sdk::metrics::reader::DefaultTemporalitySelector::new()), + )?; + let reader = PeriodicReader::builder(exporter, runtime::Tokio) + .with_interval(Duration::from_secs(3)) + .build(); + let meter_provider = MeterProvider::builder().with_reader(reader).build(); + super::init_and_set_global_meter_provider(meter_provider) +} diff --git a/orion-lib/src/observability/opentelemetry/metric_exporters/prometheus.rs b/orion-lib/src/observability/opentelemetry/metric_exporters/prometheus.rs new file mode 100644 index 00000000..7b6287bf --- /dev/null +++ b/orion-lib/src/observability/opentelemetry/metric_exporters/prometheus.rs @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::net::SocketAddr; + +use axum::{extract::State, response::IntoResponse, routing::get, Router}; +use opentelemetry_sdk::metrics::MeterProvider; +use prometheus::{Encoder, Registry, TextEncoder}; +use tracing::{trace, warn}; + +use crate::Result; + +pub fn init(addr: SocketAddr) -> Result<()> { + let registry = prometheus::Registry::new(); + let reader = opentelemetry_prometheus::exporter() + .with_registry(registry.clone()) + .build()?; + let meter_provider = MeterProvider::builder().with_reader(reader).build(); + super::init_and_set_global_meter_provider(meter_provider)?; + let fut = start_prometheus_server(addr, registry)?; + tokio::spawn(async move { + if let Err(e) = fut.await { + tracing::error!("Prometheus dashboard failed with error: {e}"); + } + }); + Ok(()) +} + +fn start_prometheus_server( + addr: SocketAddr, + registry: Registry, +) -> Result> + Send> { + async fn get_handler(State(registry): State) -> axum::response::Response { + trace!("prometheum got pinged"); + let encoder = TextEncoder::new(); + let metric_families = registry.gather(); + let mut result = Vec::new(); + if let Err(e) = encoder.encode(&metric_families, &mut result) { + warn!("failed to encode metrics for prometheus because of error: \"{e}\""); + ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + format!("{e}"), + ) + .into_response() + } else { + result.into_response() + } + } + let app = Router::new().route("/metrics", get(get_handler).with_state(registry)); + Ok(async move { + axum::Server::bind(&addr) + .serve(app.into_make_service()) + .await?; + Ok(()) + }) +} diff --git a/orion-lib/src/observability/opentelemetry/metric_exporters/stdout.rs b/orion-lib/src/observability/opentelemetry/metric_exporters/stdout.rs new file mode 100644 index 00000000..e51ca3cb --- /dev/null +++ b/orion-lib/src/observability/opentelemetry/metric_exporters/stdout.rs @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use opentelemetry_sdk::{ + metrics::{MeterProvider, PeriodicReader}, + runtime, +}; + +use crate::Result; + +pub fn init() -> Result<()> { + let exporter = opentelemetry_stdout::MetricsExporterBuilder::default().build(); + let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); + let meter_provider = MeterProvider::builder().with_reader(reader).build(); + super::init_and_set_global_meter_provider(meter_provider) +} diff --git a/orion-lib/src/observability/prometheus_server.rs b/orion-lib/src/observability/prometheus_server.rs new file mode 100644 index 00000000..9929d676 --- /dev/null +++ b/orion-lib/src/observability/prometheus_server.rs @@ -0,0 +1,137 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{ + net::{TcpListener, ToSocketAddrs}, + result::Result as StdResult, + sync::Arc, +}; + +use axum::{ + extract::State, http::header::HeaderValue, response::IntoResponse, routing::get, Router, +}; +use http::header::CONTENT_TYPE; +use prometheus_client::{ + collector::Collector, + encoding::{text::encode, DescriptorEncoder, EncodeMetric}, + metrics::{counter::ConstCounter, gauge::ConstGauge}, + registry::Registry, +}; +use tower_http::set_header::SetResponseHeaderLayer; +use tracing::{trace, warn}; + +use super::*; +use crate::Result; + +//We create an extra struct here because the prometheus_client api wants a boxed up dyn trait +// object later on. +// +// Not a huge fan of the prometheus_client api, given how simple the file-format used is +// we might be able to roll our own as well and reduce the boilerplate +#[derive(Debug)] +struct ListenerCollector; +impl Collector for ListenerCollector { + fn encode(&self, mut encoder: DescriptorEncoder) -> StdResult<(), std::fmt::Error> { + let active = Statistics::listener() + .active_connections + .load(Ordering::Relaxed); + let total = Statistics::listener() + .total_connections + .load(Ordering::Relaxed); + let counter = ConstCounter::new(total); + counter.encode(encoder.encode_descriptor( + "connections", + "all connections", + None, + counter.metric_type(), + )?)?; + let gauge = ConstGauge::new(active); + gauge.encode(encoder.encode_descriptor( + "active_connections", + "active connections", + None, + gauge.metric_type(), + )?) + } +} + +#[derive(Debug)] +struct HttpCollector; +impl Collector for HttpCollector { + fn encode(&self, mut encoder: DescriptorEncoder) -> StdResult<(), std::fmt::Error> { + let gateway_errors = Statistics::http().gateway_errors.load(Ordering::Relaxed); + let counter = ConstCounter::new(gateway_errors); + counter.encode(encoder.encode_descriptor( + "gateway_errors", + "number of gateway error responses generated by this proxy", + None, + counter.metric_type(), + )?) + } +} + +impl Statistics { + pub fn register_with_prometheus(registry: &mut Registry) { + registry + .sub_registry_with_prefix("listener") + .register_collector(Box::new(ListenerCollector)); + registry + .sub_registry_with_prefix("http") + .register_collector(Box::new(HttpCollector)); + } + + pub fn create_prometheus_register() -> Registry { + let mut registry = Registry::default(); + Statistics::register_with_prometheus(&mut registry); + registry + } +} + +pub async fn serve_on(addr: impl ToSocketAddrs) -> Result<()> { + async fn get_handler(State(registry): State>) -> axum::response::Response { + trace!("prometheum got pinged"); + let mut buffer = String::new(); + if let Err(e) = encode(&mut buffer, ®istry) { + warn!("failed to encode metrics for prometheus because of error: \"{e}\""); + ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + format!("{e}"), + ) + .into_response() + } else { + buffer.into_response() + } + } + let registry = Statistics::create_prometheus_register(); + let app = Router::new() + .route( + "/metrics", + get(get_handler).with_state(std::sync::Arc::new(registry)), + ) + .layer(SetResponseHeaderLayer::overriding( + CONTENT_TYPE, + HeaderValue::from_static("text/plain"), + )); + let tcp_listener = TcpListener::bind(addr)?; + axum::Server::from_tcp(tcp_listener)? + .serve(app.into_make_service()) + .await?; + Ok(()) +} diff --git a/orion-lib/src/secrets/mod.rs b/orion-lib/src/secrets/mod.rs new file mode 100644 index 00000000..4d04a06b --- /dev/null +++ b/orion-lib/src/secrets/mod.rs @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod secrets_manager; +mod tls_configurator; +pub use secrets_manager::{CertificateSecret, SecretManager, TransportSecret}; +pub use tls_configurator::{TlsConfigurator, WantsToBuildClient, WantsToBuildServer}; diff --git a/orion-lib/src/secrets/secrets_manager.rs b/orion-lib/src/secrets/secrets_manager.rs new file mode 100644 index 00000000..fcca6a71 --- /dev/null +++ b/orion-lib/src/secrets/secrets_manager.rs @@ -0,0 +1,180 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::Result; +use compact_str::{CompactString, ToCompactString}; +use orion_configuration::{ + config::secret::{Secret, TlsCertificate, Type, ValidationContext}, + VerifySingleIter, +}; +use rustc_hash::FxHashMap as HashMap; +use rustls::{ + pki_types::{CertificateDer, PrivateKeyDer}, + RootCertStore, +}; +use rustls_pemfile::{certs, pkcs8_private_keys}; +use std::sync::Arc; +use tracing::{debug, warn}; +use webpki::types::ServerName; +use x509_parser::{self, extensions::GeneralName}; + +#[derive(Clone, Debug)] +pub struct CertStore(Arc); + +impl From for Arc { + fn from(value: CertStore) -> Self { + value.0 + } +} + +impl TryFrom<&ValidationContext> for CertStore { + type Error = crate::Error; + + fn try_from(validation_context: &ValidationContext) -> Result { + let mut ca_reader = validation_context.trusted_ca().into_buf_read()?; + let mut root_store = rustls::RootCertStore::empty(); + let ca_certs = certs(&mut ca_reader) + .map(|f| f.map_err(|e| format!("Can't parse certificate {e:?}").into())) + .collect::>>()?; + + if ca_certs.is_empty() { + return Err("No certificates have been configured".into()); + } + + let (good, bad) = root_store.add_parsable_certificates(ca_certs); + debug!("Added certs {good} rejected certs {bad}"); + if bad > 0 { + Err("Some certs in the trust store were invalid".into()) + } else { + Ok(CertStore(Arc::new(root_store))) + } + } +} + +#[derive(Debug, Default)] +pub struct SecretManager { + certificate_secrets: HashMap>, + validation_contexts: HashMap>, +} + +#[derive(Debug, Clone)] +pub struct CertificateSecret { + pub name: Option, + pub key: Arc>, + pub certs: Arc>>, +} + +#[derive(Debug, Clone)] +pub enum TransportSecret { + Certificate(Arc), + ValidationContext(Arc), +} + +impl TryFrom<&TlsCertificate> for CertificateSecret { + type Error = crate::Error; + + fn try_from(certificate: &TlsCertificate) -> Result { + let mut cert_reader = certificate.certificate_chain().into_buf_read()?; + let mut key_reader = certificate.private_key().into_buf_read()?; + let key = pkcs8_private_keys(&mut key_reader) + .map(|f| f.map_err(|e| format!("Can't parse private key: {e}"))) + .verify_single()??; + + let certificates = certs(&mut cert_reader) + .map(|f| f.map_err(|e| format!("Can't parse certificate {e:?}").into())) + .collect::>>()?; + + let certificates: Vec<_> = certificates.into_iter().map(CertificateDer::from).collect(); + let Some(cert) = certificates.first() else { + return Err("No certificates have been configured".into()); + }; + + let mut server_name = None; + let cloned_cert = cert.clone(); + let (_, x509_cert) = x509_parser::parse_x509_certificate(&cloned_cert)?; + let subject = x509_cert.subject(); + if let Ok(Some(san)) = x509_cert.subject_alternative_name() { + for san_name in &san.value.general_names { + let name = match *san_name { + GeneralName::DNSName(name) => name.to_owned(), + _ => continue, + }; + + let is_server_name = ServerName::try_from(name.clone()).is_ok(); + debug!("Certificate SAN name {san_name} {name } is server name {is_server_name}"); + if is_server_name { + server_name = Some(name.to_compact_string()); + } + } + } + let common_name = subject.iter_common_name().next().and_then(|cn| cn.as_str().ok()); + + debug!("Certificate Subject's common name {common_name:?}"); + + let key = Arc::new(PrivateKeyDer::Pkcs8(key)); + Ok(CertificateSecret { name: server_name, key, certs: Arc::new(certificates) }) + } +} + +impl SecretManager { + pub fn new() -> Self { + Self { certificate_secrets: HashMap::default(), validation_contexts: HashMap::default() } + } + + pub fn add(&mut self, secret: Secret) -> Result { + let secret_id = secret.name(); + let secret = match secret.kind() { + Type::TlsCertificate(certificate) => { + let secret = Arc::new(CertificateSecret::try_from(certificate)?); + let _old_value = self.certificate_secrets.insert(secret_id.to_owned(), Arc::clone(&secret)); + TransportSecret::Certificate(secret) + }, + Type::ValidationContext(validation_context) => { + let store = Arc::new(CertStore::try_from(validation_context)?); + let _old_value = self.validation_contexts.insert(secret_id.to_owned(), Arc::clone(&store)); + TransportSecret::ValidationContext(store) + }, + }; + Ok(secret) + } + pub fn remove(&mut self, secret_id: &str, secret_type: &Type) -> Result<()> { + match secret_type { + Type::TlsCertificate(_) => { + let _old_value = self.certificate_secrets.remove(secret_id); + }, + Type::ValidationContext(_) => { + let _old_value = self.validation_contexts.remove(secret_id); + }, + } + Ok(()) + } + + pub fn get_certificate(&self, secret_id: &str) -> Result> { + let value = self.certificate_secrets.get(secret_id); + if value.is_none() { + warn!("SDS secret '{secret_id}' is missing"); + } + Ok(value.map(|s| TransportSecret::Certificate(Arc::clone(s)))) + } + pub fn get_validation_context(&self, secret_id: &str) -> Result> { + let value = self.validation_contexts.get(secret_id); + Ok(value.map(|s| TransportSecret::ValidationContext(Arc::clone(s)))) + } +} diff --git a/orion-lib/src/secrets/tls_configurator/configurator.rs b/orion-lib/src/secrets/tls_configurator/configurator.rs new file mode 100644 index 00000000..611f423a --- /dev/null +++ b/orion-lib/src/secrets/tls_configurator/configurator.rs @@ -0,0 +1,507 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::tls_configurator_builder::{WantsToBuildClient, WantsToBuildServer}; +use crate::{ + secrets::{ + secrets_manager::CertStore, + tls_configurator::tls_configurator_builder::{SecretHolder, TlsContextBuilder}, + CertificateSecret, TransportSecret, + }, + Result, SecretManager, +}; +use compact_str::CompactString; +use orion_configuration::config::{ + cluster::{TlsConfig as TlsClientConfig, TlsSecret}, + listener::TlsConfig as TlsServerConfig, + secret::TlsCertificate as TlsCertificateConfig, + transport::{CommonTlsValidationContext, Secrets, TlsVersion}, +}; +use rustls::{ + client::danger::ServerCertVerifier, + crypto::KeyProvider, + pki_types::{CertificateDer, PrivateKeyDer}, + version::{TLS12, TLS13}, + ClientConfig, RootCertStore, ServerConfig, +}; +use rustls_platform_verifier::Verifier; +use std::{collections::HashMap, result::Result as StdResult, sync::Arc}; +use tracing::{debug, warn}; +use webpki::types::ServerName; + +pub fn get_crypto_key_provider() -> Result<&'static dyn KeyProvider> { + rustls::crypto::CryptoProvider::get_default() + .map(|p| p.key_provider) + .ok_or("Unable to get rustls crypto provider".into()) +} + +#[allow(dead_code)] +#[derive(Debug)] +struct IgnoreCertVerifier(Verifier); + +impl ServerCertVerifier for IgnoreCertVerifier { + fn verify_server_cert( + &self, + _: &rustls::pki_types::CertificateDer<'_>, + _: &[rustls::pki_types::CertificateDer<'_>], + _: &rustls::pki_types::ServerName<'_>, + _: &[u8], + _: rustls::pki_types::UnixTime, + ) -> StdResult { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> StdResult { + self.0.verify_tls12_signature(message, cert, dss) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &rustls::pki_types::CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> StdResult { + self.0.verify_tls13_signature(message, cert, dss) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.supported_verify_schemes() + } +} + +#[derive(Debug, Clone)] +pub struct ClientCert { + pub key: Arc>, + pub certs: Arc>>, +} + +impl From for ClientCert { + fn from(secret: CertificateSecret) -> Self { + let CertificateSecret { name: _, key, certs } = secret; + ClientCert { key, certs } + } +} + +impl TryFrom for Arc { + type Error = crate::Error; + fn try_from(value: TransportSecret) -> Result { + match value { + TransportSecret::ValidationContext(context) => { + let cert_store = context.as_ref().clone(); + Ok(cert_store.into()) + }, + TransportSecret::Certificate(_) => { + Err("TransportSecret certificate is not supported for root cert store".into()) + }, + } + } +} + +impl TryFrom for ServerCert { + type Error = crate::Error; + fn try_from(value: TransportSecret) -> Result { + match value { + TransportSecret::Certificate(certificate) => { + let certificate = certificate.as_ref().clone(); + ServerCert::try_from(certificate) + }, + TransportSecret::ValidationContext(_) => { + Err("TransportSecret ValidationContext is not supported for server certificate".into()) + }, + } + } +} + +impl TryFrom for ClientCert { + type Error = crate::Error; + fn try_from(value: TransportSecret) -> Result { + match value { + TransportSecret::Certificate(certificate) => { + let certificate = certificate.as_ref().clone(); + Ok(ClientCert::from(certificate)) + }, + TransportSecret::ValidationContext(_) => { + Err("TransportSecret ValidationContext is not supported for client certificate".into()) + }, + } + } +} + +impl TryFrom for ServerCert { + type Error = crate::Error; + fn try_from(secret: CertificateSecret) -> Result { + let CertificateSecret { name, key, certs } = secret; + if let Some(name) = name { + Ok(ServerCert { name, key: Arc::new(key.clone_key()), certs }) + } else { + Err("secret doesn't contain server name".into()) + } + } +} + +impl TryFrom> for ClientCert { + type Error = crate::Error; + + fn try_from(mut tls_certificates: Vec) -> Result { + if tls_certificates.len() > 1 { + return Err("Only one client certificate should be configured".into()); + } + let certificate = tls_certificates.remove(0); + ClientCert::try_from(certificate) + } +} + +impl TryFrom for ClientCert { + type Error = crate::Error; + + fn try_from(certificate: TlsCertificateConfig) -> Result { + let secret = CertificateSecret::try_from(&certificate)?; + Ok(ClientCert::from(secret)) + } +} + +#[derive(Clone)] +pub struct ServerCert { + pub name: CompactString, + pub key: Arc>, + pub certs: Arc>>, +} + +impl std::fmt::Debug for ServerCert { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ServerCert") + .field("key", &"Secret") + .field("certs", &self.certs) + .field("name", &self.name) + .finish() + } +} + +#[derive(Debug, Clone)] +pub struct TlsConfigurator { + context_builder: TlsContextBuilder, + config: Arc, +} + +impl TlsConfigurator { + pub fn update(self, secret_id: &str, secret: TransportSecret) -> Result { + let TlsContextBuilder { state } = self.context_builder; + let WantsToBuildClient { + supported_versions, + validation_context_secret_id, + certificate_store, + certificate_secret_id, + client_certificate, + sni, + } = state; + let new_builder = match secret { + TransportSecret::Certificate(certificate) => { + if Some(secret_id.to_owned()) == certificate_secret_id { + let client_cert: ClientCert = (*certificate).clone().into(); + TlsContextBuilder::with_supported_versions(supported_versions) + .with_client_certificate_store(validation_context_secret_id, certificate_store) + .with_client_certificate(certificate_secret_id, Arc::new(client_cert)) + .with_sni(sni) + } else { + let msg = format!("Secret name doesn't match {secret_id} {:?}", certificate_secret_id.as_deref()); + warn!("{msg}"); + return Err(msg.into()); + } + }, + TransportSecret::ValidationContext(cert_store) => { + if Some(secret_id.to_owned()) == validation_context_secret_id { + let cert_store = (*cert_store).clone().into(); + let builder = TlsContextBuilder::with_supported_versions(supported_versions) + .with_client_certificate_store(validation_context_secret_id, cert_store); + if let Some(client_certificate) = client_certificate { + builder.with_client_certificate(certificate_secret_id, client_certificate) + } else { + builder.with_no_client_auth() + } + .with_sni(sni) + } else { + let msg = format!("Secret name doesn't match {secret_id} {validation_context_secret_id:?}",); + warn!("{msg}"); + return Err(msg.into()); + } + }, + }; + + let new_config = new_builder.build()?; + Ok(TlsConfigurator { context_builder: new_builder, config: Arc::new(new_config) }) + } +} + +impl TlsConfigurator { + pub fn update(self, secret_id: &str, secret: TransportSecret) -> Result { + let TlsContextBuilder { state } = self.context_builder; + let WantsToBuildServer { + supported_versions, + validation_context_secret_id, + certificate_store, + mut server_ids_and_certificates, + require_client_cert, + } = state; + let new_builder = match secret { + TransportSecret::Certificate(certificate) => { + if let Some(secret) = server_ids_and_certificates.iter_mut().find(|s| s.name == secret_id) { + let server_cert: ServerCert = (*certificate).clone().try_into()?; + secret.server_cert = server_cert; + + let builder = TlsContextBuilder::with_supported_versions(supported_versions); + + if let Some(certificate_store) = certificate_store { + builder.with_server_certificate_store(validation_context_secret_id, certificate_store) + } else { + builder.with_no_client_auth() + } + .with_certificates(server_ids_and_certificates) + .with_client_authentication(require_client_cert) + } else { + let msg = format!("Can't find secret {secret_id}"); + debug!("{msg}"); + return Err(msg.into()); + } + }, + TransportSecret::ValidationContext(cert_store) => { + if Some(secret_id.to_owned()) == validation_context_secret_id { + let cert_store = (*cert_store).clone().into(); + TlsContextBuilder::with_supported_versions(supported_versions) + .with_server_certificate_store(validation_context_secret_id, cert_store) + .with_certificates(server_ids_and_certificates) + .with_client_authentication(require_client_cert) + } else { + let msg = format!("Can't find secret {secret_id} {validation_context_secret_id:?}",); + debug!("{msg}"); + return Err(msg.into()); + } + }, + }; + + let new_config = new_builder.build()?; + Ok(TlsConfigurator { context_builder: new_builder, config: Arc::new(new_config) }) + } +} + +impl TryFrom<(TlsServerConfig, &SecretManager)> for TlsConfigurator { + type Error = crate::Error; + fn try_from((config, secret_manager): (TlsServerConfig, &SecretManager)) -> StdResult { + let require_client_cert = config.require_client_certificate; + let common_context = config.common_tls_context; + let supported_versions = common_context + .parameters + .supported_version() + .into_iter() + .map(|version| match version { + TlsVersion::TLSv1_2 => &TLS12, + TlsVersion::TLSv1_3 => &TLS13, + }) + .collect(); + debug!("DownstreamTlsContext : Selected TLS versions {supported_versions:?}"); + let (certificate_store_secret_id, certificate_store) = + TlsConfigurator::create_certificate_store(secret_manager, common_context.validation_context)?; + + let certs_and_secret_ids = match common_context.secrets { + Secrets::Certificates(certs) => { + let mut certs_and_secret_ids = vec![]; + for certificate in certs { + certs_and_secret_ids.push(SecretHolder::new( + CompactString::default(), + ServerCert::try_from(CertificateSecret::try_from(&certificate)?)?, + )); + } + certs_and_secret_ids + }, + Secrets::SdsConfig(sds) => { + let mut certs_and_secret_ids = vec![]; + for sds_config_name in sds { + if let Some(certificate) = secret_manager.get_certificate(&sds_config_name)? { + let server_cert: ServerCert = certificate.try_into()?; + let secret = SecretHolder::new(sds_config_name.clone(), server_cert); + if certs_and_secret_ids.contains(&secret) { + let msg = format!("DownstreamTlsContext : Duplicate secret name {}", &sds_config_name); + warn!("{msg}"); + return Err(msg.into()); + } + certs_and_secret_ids.push(secret); + } + } + certs_and_secret_ids + }, + }; + + let ctx_builder = TlsContextBuilder::with_supported_versions(supported_versions); + let ctx_builder = if let Some(certificate_store) = certificate_store { + ctx_builder.with_server_certificate_store( + certificate_store_secret_id.map(CompactString::into_string), + certificate_store, + ) + } else { + ctx_builder.with_no_client_auth() + } + .with_certificates(certs_and_secret_ids) + .with_client_authentication(require_client_cert); + + let config = ctx_builder.build()?; + + Ok(TlsConfigurator:: { + context_builder: ctx_builder, + config: Arc::new(config), + }) + } +} + +impl TryFrom<(TlsClientConfig, &SecretManager)> for TlsConfigurator { + type Error = crate::Error; + + fn try_from((context, secret_manager): (TlsClientConfig, &SecretManager)) -> StdResult { + let sni = context.sni; + let supported_versions = context + .parameters + .supported_version() + .into_iter() + .map(|version| match version { + TlsVersion::TLSv1_2 => &TLS12, + TlsVersion::TLSv1_3 => &TLS13, + }) + .collect(); + debug!("DownstreamTlsContext : Selected TLS versions {supported_versions:?}"); + let (certificate_store_secret_id, certificate_store) = + TlsConfigurator::create_certificate_store(secret_manager, context.validation_context)?; + + let (secret_id, client_certificate) = match context.secret { + Some(TlsSecret::Certificate(cert)) => (None, Some(ClientCert::try_from(cert)?)), + Some(TlsSecret::SdsConfig(sds_config_name)) => { + let cert = secret_manager.get_certificate(&sds_config_name)?.map(ClientCert::try_from).transpose()?; + (Some(sds_config_name), cert) + }, + None => (None, None), + }; + + let Some(certificate_store) = certificate_store else { + return Err("UpstreamContext : no TLS validation options found".into()); + }; + let ctx_builder = TlsContextBuilder::with_supported_versions(supported_versions).with_client_certificate_store( + certificate_store_secret_id.map(CompactString::into_string), + certificate_store, + ); + + let ctx_builder = if let Some(client_certificate) = client_certificate { + ctx_builder.with_client_certificate(secret_id.map(CompactString::into_string), Arc::new(client_certificate)) + } else { + ctx_builder.with_no_client_auth() + } + .with_sni(sni.into_string()); + + let config = ctx_builder.build()?; + + Ok(TlsConfigurator:: { + context_builder: ctx_builder, + config: Arc::new(config), + }) + } +} + +impl TlsConfigurator<(), ()> { + /// Create a certificate store from the provided configuration options + /// + /// Returns an sds secret id and a TLS certificate store. If no validation context + /// is configured returns Ok((None, None)) + fn create_certificate_store( + secret_manager: &SecretManager, + validation_options: Option, + ) -> Result<(Option, Option>)> { + match validation_options { + Some(CommonTlsValidationContext::ValidationContext(validation_context)) => { + Ok((None, Some(CertStore::try_from(&validation_context)?.into()))) + }, + Some(CommonTlsValidationContext::SdsConfig(sds_config_name)) => { + if let Some(cert_store) = secret_manager.get_validation_context(&sds_config_name)? { + Ok((Some(sds_config_name.clone()), Some(cert_store.try_into()?))) + } else { + Ok((Some(sds_config_name.clone()), Some(Arc::new(RootCertStore::empty())))) + } + }, + None => Ok((None, None)), + } + } +} + +impl TlsConfigurator { + pub fn into_inner(self) -> ServerConfig { + (*self.config).clone() + } +} + +impl TlsConfigurator { + pub fn into_inner(self) -> ClientConfig { + (*self.config).clone() + } + pub fn sni(&self) -> String { + self.context_builder.state.sni.clone() + } +} + +/// More relaxed version of ResolvesServerCertUsingSni +/// Allowing ServerName instead of DNSNames + +#[derive(Debug)] +pub struct RelaxedResolvesServerCertUsingSni { + by_name: HashMap>, +} + +impl RelaxedResolvesServerCertUsingSni { + pub fn new() -> Self { + Self { by_name: HashMap::new() } + } + + pub fn add(&mut self, name: &str, ck: rustls::sign::CertifiedKey) -> StdResult<(), rustls::Error> { + let name = name.to_ascii_lowercase(); + let server_name = + { ServerName::try_from(name).map_err(|_| rustls::Error::General("Bad Server/DNS name".into()))? }; + + ck.end_entity_cert() + .and_then(rustls::server::ParsedCertificate::try_from) + .and_then(|cert| rustls::client::verify_server_name(&cert, &server_name))?; + + if let ServerName::DnsName(name) = server_name { + self.by_name.insert(name.as_ref().to_owned(), Arc::new(ck)); + } else { + warn!("Server name is not valid DNS name"); + } + Ok(()) + } +} + +impl rustls::server::ResolvesServerCert for RelaxedResolvesServerCertUsingSni { + fn resolve(&self, client_hello: rustls::server::ClientHello) -> Option> { + if let Some(name) = client_hello.server_name() { + self.by_name.get(name).cloned() + } else { + // This kind of resolver requires SNI + None + } + } +} diff --git a/orion-lib/src/secrets/tls_configurator/mod.rs b/orion-lib/src/secrets/tls_configurator/mod.rs new file mode 100644 index 00000000..4e753b64 --- /dev/null +++ b/orion-lib/src/secrets/tls_configurator/mod.rs @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +mod configurator; +mod tls_configurator_builder; +pub use configurator::TlsConfigurator; +pub use tls_configurator_builder::{WantsToBuildClient, WantsToBuildServer}; diff --git a/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs b/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs new file mode 100644 index 00000000..63595f5c --- /dev/null +++ b/orion-lib/src/secrets/tls_configurator/tls_configurator_builder.rs @@ -0,0 +1,324 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::sync::Arc; + +use compact_str::CompactString; +use rustls::{ + client::WebPkiServerVerifier, server::WebPkiClientVerifier, sign::CertifiedKey, ClientConfig, RootCertStore, + ServerConfig, SupportedProtocolVersion, +}; +use tracing::{debug, warn}; + +use super::configurator::{get_crypto_key_provider, ClientCert, RelaxedResolvesServerCertUsingSni, ServerCert}; + +#[derive(Debug, Clone)] +pub struct WantsCertStore { + pub supported_versions: Vec<&'static SupportedProtocolVersion>, +} + +#[derive(Debug, Clone)] +pub struct WantsServerCert { + supported_versions: Vec<&'static SupportedProtocolVersion>, + validation_context_secret_id: Option, + certificate_store: Option>, +} + +#[derive(Debug, Clone)] +pub struct WantsClientCert { + supported_versions: Vec<&'static SupportedProtocolVersion>, + validation_context_secret_id: Option, + certificate_store: Arc, +} + +#[derive(Debug, Clone)] +pub struct SecretHolder { + pub name: CompactString, + pub server_cert: ServerCert, +} + +impl PartialEq for SecretHolder { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + } +} + +impl Eq for SecretHolder {} + +impl PartialOrd for SecretHolder { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SecretHolder { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.name.cmp(&other.name) + } +} +impl SecretHolder { + pub fn new(name: CompactString, server_cert: ServerCert) -> Self { + Self { name, server_cert } + } +} + +#[derive(Debug, Clone)] +pub struct WantsToBuildServer { + pub supported_versions: Vec<&'static SupportedProtocolVersion>, + pub validation_context_secret_id: Option, + pub certificate_store: Option>, + pub server_ids_and_certificates: Vec, + pub require_client_cert: bool, +} + +#[derive(Debug, Clone)] +pub struct WantsToVerifyClientCert { + supported_versions: Vec<&'static SupportedProtocolVersion>, + validation_context_secret_id: Option, + certificate_store: Option>, + server_ids_and_certificates: Vec, +} + +#[derive(Debug, Clone)] +pub struct WantsSni { + supported_versions: Vec<&'static SupportedProtocolVersion>, + validation_context_secret_id: Option, + certificate_store: Arc, + certificate_secret_id: Option, + client_certificate: Option>, +} + +#[derive(Debug, Clone)] +pub struct WantsToBuildClient { + pub supported_versions: Vec<&'static SupportedProtocolVersion>, + pub validation_context_secret_id: Option, + pub certificate_store: Arc, + pub certificate_secret_id: Option, + pub client_certificate: Option>, + pub sni: String, +} + +#[derive(Debug, Clone)] +pub struct TlsContextBuilder { + pub state: S, +} + +use crate::Result; + +impl TlsContextBuilder<()> { + pub fn with_supported_versions( + supported_versions: Vec<&'static SupportedProtocolVersion>, + ) -> TlsContextBuilder { + TlsContextBuilder { state: WantsCertStore { supported_versions } } + } +} + +impl TlsContextBuilder { + pub fn with_server_certificate_store( + self, + secret_id: Option, + certificate_store: Arc, + ) -> TlsContextBuilder { + let state = WantsServerCert { + supported_versions: self.state.supported_versions, + validation_context_secret_id: secret_id, + certificate_store: Some(certificate_store), + }; + TlsContextBuilder { state } + } + + pub fn with_no_client_auth(self) -> TlsContextBuilder { + let state = WantsServerCert { + supported_versions: self.state.supported_versions, + validation_context_secret_id: None, + certificate_store: None, + }; + TlsContextBuilder { state } + } + + pub fn with_client_certificate_store( + self, + secret_id: Option, + certificate_store: Arc, + ) -> TlsContextBuilder { + let state = WantsClientCert { + supported_versions: self.state.supported_versions, + validation_context_secret_id: secret_id, + certificate_store, + }; + TlsContextBuilder { state } + } +} + +impl TlsContextBuilder { + pub fn with_certificates( + self, + server_ids_and_certificates: Vec, + ) -> TlsContextBuilder { + TlsContextBuilder { + state: WantsToVerifyClientCert { + supported_versions: self.state.supported_versions, + validation_context_secret_id: self.state.validation_context_secret_id, + certificate_store: self.state.certificate_store, + server_ids_and_certificates, + }, + } + } +} + +impl TlsContextBuilder { + pub fn with_client_authentication(self, require_client_cert: bool) -> TlsContextBuilder { + TlsContextBuilder { + state: WantsToBuildServer { + supported_versions: self.state.supported_versions, + validation_context_secret_id: self.state.validation_context_secret_id, + certificate_store: self.state.certificate_store, + server_ids_and_certificates: self.state.server_ids_and_certificates, + require_client_cert, + }, + } + } +} + +impl TlsContextBuilder { + pub fn with_client_certificate( + self, + secret_id: Option, + client_certificate: Arc, + ) -> TlsContextBuilder { + TlsContextBuilder { + state: WantsSni { + supported_versions: self.state.supported_versions, + certificate_store: self.state.certificate_store, + validation_context_secret_id: self.state.validation_context_secret_id, + client_certificate: Some(client_certificate), + certificate_secret_id: secret_id, + }, + } + } + pub fn with_no_client_auth(self) -> TlsContextBuilder { + TlsContextBuilder { + state: WantsSni { + supported_versions: self.state.supported_versions, + certificate_store: self.state.certificate_store, + validation_context_secret_id: self.state.validation_context_secret_id, + client_certificate: None, + certificate_secret_id: None, + }, + } + } +} + +impl TlsContextBuilder { + pub fn with_sni(self, sni: String) -> TlsContextBuilder { + TlsContextBuilder { + state: WantsToBuildClient { + supported_versions: self.state.supported_versions, + certificate_store: self.state.certificate_store, + validation_context_secret_id: self.state.validation_context_secret_id, + client_certificate: self.state.client_certificate, + certificate_secret_id: self.state.certificate_secret_id, + sni, + }, + } + } +} + +impl TlsContextBuilder { + pub fn build(&self) -> Result { + let builder = ServerConfig::builder_with_protocol_versions(&self.state.supported_versions.clone()); + + let verifier = match (self.state.require_client_cert, &self.state.certificate_store) { + (true, None) => { + return Err("requireClientCertificate is true but no validation_context is configured".into()) + }, + (true, Some(certificate_store)) => { + Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).build()?) + }, + (false, Some(certificate_store)) => { + Some(WebPkiClientVerifier::builder(Arc::clone(certificate_store)).allow_unauthenticated().build()?) + }, + (false, None) => None, + }; + + let builder = if let Some(verifier) = verifier { + builder.with_client_cert_verifier(verifier) + } else { + builder.with_no_client_auth() + }; + let provider = get_crypto_key_provider()?; + + if let [SecretHolder { name: _, server_cert: ServerCert { certs, key, name: _ } }] = + self.state.server_ids_and_certificates.as_slice() + { + // If only a single certificate exists, do not install SNI resolver, just accept all + // connections using the provided certificate + return Ok(builder.with_single_cert(certs.to_vec(), key.clone_key())?); + }; + + let mut resolver = RelaxedResolvesServerCertUsingSni::new(); + let errors = self + .state + .server_ids_and_certificates + .iter() + .map(|SecretHolder { name: secret_name, server_cert: ServerCert { certs, key, name } }| { + provider + .load_private_key(key.clone_key()) + .map(|private_key| { + let certs = (**certs).clone(); + (secret_name, name, CertifiedKey::new(certs, private_key)) + }) + .map_err(|e| format!("UpstreamContext: Can't load private key {secret_name} {name} - {e}").into()) + .inspect_err(|e| warn!("{e}")) + }) + .filter_map(Result::ok) + .filter_map(|(secret_name, name, ck)| { + resolver + .add(name, ck) + .inspect_err(|e| { + warn!("UpstreamContext: Can't add certificate for secret '{secret_name}' {name} - {e}"); + }) + .err() + }) + .count(); + if errors > 0 { + Err(format!("Found {errors} errors in Tls context").into()) + } else { + Ok(builder.with_cert_resolver(Arc::new(resolver))) + } + } +} + +impl TlsContextBuilder { + pub fn build(&self) -> Result { + let builder = ClientConfig::builder_with_protocol_versions(&self.state.supported_versions.clone()); + + let verifier = WebPkiServerVerifier::builder(Arc::clone(&self.state.certificate_store)).build()?; + let builder = builder.with_webpki_verifier(verifier); + + if let Some(ClientCert { key, certs: auth_certs }) = self.state.client_certificate.as_deref() { + debug!("UpstreamContext : Selected Client Cert"); + let certs: Vec> = auth_certs.as_ref().clone(); + Ok(builder.with_client_auth_cert(certs, key.clone_key())?) + } else { + Ok(builder.with_no_client_auth()) + } + } +} diff --git a/orion-lib/src/thread_local.rs b/orion-lib/src/thread_local.rs new file mode 100644 index 00000000..dfe37ea3 --- /dev/null +++ b/orion-lib/src/thread_local.rs @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use thread_local::ThreadLocal; + +pub trait LocalBuilder { + fn build(&self, arg: A) -> T; +} + +/// Provides a thread-local instance of an object. When a thread requests the local copy +/// through [LocalObject::get()], the first time constructs it using the builder +/// and arguments provided in [LocalObject::new()]. In subsequent requests a reference +/// to this thread-local object is provided. +#[derive(Debug)] +pub struct LocalObject +where + T: Sync + Send, + B: LocalBuilder, + A: Clone, +{ + tls: ThreadLocal, + builder: B, + arg: A, +} + +impl LocalObject +where + T: Sync + Send, + B: LocalBuilder, + A: Clone, +{ + pub fn new(builder: B, arg: A) -> Self { + let tls = ThreadLocal::new(); + LocalObject { tls, builder, arg } + } + + pub fn get(&self) -> &T { + self.tls.get_or(|| self.builder.build(self.arg.clone())) + } +} diff --git a/orion-lib/src/transport/bind_device.rs b/orion-lib/src/transport/bind_device.rs new file mode 100644 index 00000000..9a0b4868 --- /dev/null +++ b/orion-lib/src/transport/bind_device.rs @@ -0,0 +1,174 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub use orion_configuration::config::transport::BindDevice; + +#[cfg(target_os = "linux")] +pub(crate) fn bind_device(s: &tokio::net::TcpSocket, binddev: &BindDevice) -> std::io::Result<()> { + let name = binddev.interface(); + tracing::trace!("binding socket to dev {:?}", name); + s.bind_device(Some(name.to_bytes_with_nul())) +} + +#[cfg(not(target_os = "linux"))] +pub(crate) fn bind_device(_: &tokio::net::TcpSocket, _: &BindDevice) -> std::io::Result<()> { + Err(std::io::Error::new(std::io::ErrorKind::Other, "BINDTODEVICE is not supported")) +} + +#[cfg(test)] +mod tests { + use std::ffi::CStr; + + use super::*; + use orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::{socket_option, SocketOption}; + + #[test] + fn envoy_bind_device_none() { + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: None, + ..Default::default() + }; + BindDevice::try_from(opt).unwrap_err(); + } + + #[test] + fn envoy_bind_device_eth0_no_null() { + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: Some(socket_option::Value::BufValue("eth0".as_bytes().to_vec())), + ..Default::default() + }; + let bind = BindDevice::try_from(opt).unwrap(); + let expected = "eth0".parse().unwrap(); + assert_eq!(bind, expected); + } + + #[test] + fn envoy_bind_device_eth0_null() { + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: Some(socket_option::Value::BufValue(b"eth0\0".to_vec())), + ..Default::default() + }; + let bind = BindDevice::try_from(opt).unwrap(); + let expected = "eth0".parse().unwrap(); + assert_eq!(bind, expected); + } + + #[test] + fn envoy_bind_device_bad_option() { + let opt = + SocketOption { description: String::new(), level: 1, name: 1, state: 0, value: None, ..Default::default() }; + BindDevice::try_from(opt).unwrap_err(); + } + + #[test] + fn envoy_bind_device_too_long() { + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: Some(socket_option::Value::BufValue(b"0123456789ABCDEF".to_vec())), + ..Default::default() + }; + BindDevice::try_from(opt).unwrap_err(); + } + + #[test] + fn envoy_bind_invalid_null() { + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: Some(socket_option::Value::BufValue(b"a\0b".to_vec())), + ..Default::default() + }; + BindDevice::try_from(opt).unwrap_err(); + } + + #[test] + fn roundtrip_valid_binary() { + let iface = CStr::from_bytes_with_nul(b"a\x1b\0").unwrap(); + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + // example of a valid string taken from + // https://unix.stackexchange.com/a/677481 + // note that spaces etc. are still dissallowed by linux but accepted here + // but that's not a huge issue, binding will just fail. What is important is that we accept + // binary values and round-trip them correctly + value: Some(socket_option::Value::BufValue(iface.to_bytes().to_owned())), + ..Default::default() + }; + let bd = BindDevice::try_from(opt).unwrap(); + assert_eq!(bd.interface(), iface); + let ng_string = serde_yaml::to_string(&bd).unwrap(); + let bd: BindDevice = serde_yaml::from_str(&ng_string).unwrap(); + assert_eq!(iface, bd.interface()) + } + + #[test] + fn roundtrip_valid_cstr() { + let iface = CStr::from_bytes_with_nul(b"eth0\0").unwrap(); + let opt = SocketOption { + description: String::new(), + level: 1, + name: 25, + state: 0, + value: Some(socket_option::Value::BufValue(iface.to_bytes().to_owned())), + ..Default::default() + }; + let bd = BindDevice::try_from(opt).unwrap(); + assert_eq!(bd.interface(), iface); + let ng_string = serde_yaml::to_string(&bd).unwrap(); + let bd: BindDevice = serde_yaml::from_str(&ng_string).unwrap(); + assert_eq!(iface, bd.interface()) + } + + #[test] + fn direct_decode_bytes() { + let yaml = "interface_bytes: YRs="; + let iface = CStr::from_bytes_with_nul(b"a\x1b\0").unwrap(); + let bd: BindDevice = serde_yaml::from_str(&yaml).unwrap(); + assert_eq!(iface, bd.interface()) + } + + #[test] + fn direct_decode_iface() { + let yaml = "interface: eth0"; + let iface = CStr::from_bytes_with_nul(b"eth0\0").unwrap(); + let bd: BindDevice = serde_yaml::from_str(&yaml).unwrap(); + assert_eq!(iface, bd.interface()) + } +} diff --git a/orion-lib/src/transport/connector.rs b/orion-lib/src/transport/connector.rs new file mode 100644 index 00000000..b4309e8d --- /dev/null +++ b/orion-lib/src/transport/connector.rs @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{ + future::Future, + io, + pin::Pin, + task::{self, Poll}, + time::Duration, +}; + +use http::uri::Authority; +use hyper::Uri; +use hyper_util::rt::TokioIo; +use pingora_timeout::fast_timeout::fast_timeout; +use tokio::net::{TcpSocket, TcpStream}; +use tower::Service; +use tracing::debug; + +use crate::clusters::retry_policy::EventError; + +use super::{bind_device::BindDevice, resolve}; + +#[derive(Debug, thiserror::Error)] +pub enum ConnectError { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Event(#[from] EventError), +} + +#[derive(Clone, Debug)] +pub struct LocalConnectorWithDNSResolver { + pub addr: Authority, + pub bind_device: Option, + pub timeout: Option, +} + +impl LocalConnectorWithDNSResolver { + pub fn connect(&self) -> impl Future> + 'static { + let addr = self.addr.clone(); + let device = self.bind_device.clone(); + let connection_timeout = self.timeout; + + async move { + let host = addr.host(); + let port = addr + .port_u16() + .ok_or(io::Error::new(io::ErrorKind::AddrNotAvailable, format!("Port has to be set {addr:?}")))?; + + let addr = resolve(host, port).await?; + + let sock = match addr { + std::net::SocketAddr::V4(_) => TcpSocket::new_v4()?, + std::net::SocketAddr::V6(_) => TcpSocket::new_v6()?, + }; + + if let Some(device) = device { + // binding might succeed here but still fail later + // e.g. with an uncategorized error on connect + debug!("Binding socket to: {:?}", device); + super::bind_device::bind_device(&sock, &device)?; + } + + let stream = if let Some(connection_timeout) = connection_timeout { + fast_timeout(connection_timeout, sock.connect(addr)) + .await + .map_err(|_| EventError::ConnectTimeout)? + .map_err(|_| EventError::ConnectFailure)? + } else { + sock.connect(addr).await.map_err(|_| EventError::ConnectFailure)? + }; + + Ok(stream) + } + } +} + +impl Service for LocalConnectorWithDNSResolver { + type Response = TokioIo; + type Error = ConnectError; + + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + // This connector is always ready, but others might not be. + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: Uri) -> Self::Future { + let f = self.connect(); + Box::pin(async move { f.await.map(TokioIo::new) }) + } +} diff --git a/orion-lib/src/transport/grpc_channel.rs b/orion-lib/src/transport/grpc_channel.rs new file mode 100644 index 00000000..baa2e27a --- /dev/null +++ b/orion-lib/src/transport/grpc_channel.rs @@ -0,0 +1,89 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use futures::future::BoxFuture; +use futures::{FutureExt, TryFutureExt}; +use http::uri::{Authority, Scheme}; +use http::{Request, Uri}; + +use orion_xds::grpc_deps::{to_grpc_body, GrpcBody}; +use tower::Service; + +use crate::listeners::http_connection_manager::RequestHandler; +use crate::transport::request_context::RequestWithContext; +use crate::transport::HttpChannel; + +/// Adapts a [`HttpChannel`] to a [`Service`] that can be used as a channel for gRPC. +/// the inner value should be kept cheap to clone +#[derive(Clone, Debug)] +pub struct GrpcService { + inner: HttpChannel, + scheme: Scheme, + authority: Authority, +} + +impl GrpcService { + pub fn try_new(inner: HttpChannel, authority: Authority) -> Result { + let scheme = if inner.is_https() { Scheme::HTTPS } else { Scheme::HTTP }; + if !inner.http_version().is_http2() { + return Err("gRPC endpoints need explicit HTTP 2".into()); + } + + Ok(GrpcService { inner, scheme, authority }) + } +} + +impl GrpcService { + async fn do_call(self, grpc_req: Request) -> std::result::Result, crate::Error> { + let (mut parts, grpc_body) = grpc_req.into_parts(); + + // Add scheme and authority to gRPC URLs to make them valid HTTP + let mut uri_parts = parts.uri.clone().into_parts(); + uri_parts.scheme = Some(self.scheme.clone()); + uri_parts.authority = Some(self.authority.clone()); + parts.uri = Uri::from_parts(uri_parts)?; + + let http_req = Request::from_parts(parts, grpc_body.into()); + + let svc_resp = self.inner.to_response(RequestWithContext::new(http_req)).await?; + Ok(svc_resp.map(to_grpc_body)) + } +} + +impl Service> for GrpcService { + type Response = http::Response; + type Error = orion_xds::grpc_deps::Error; + type Future = BoxFuture<'static, std::result::Result>; + + fn poll_ready( + &mut self, + _cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + // HttpService doesn't have poll_ready() + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, grpc_req: Request) -> Self::Future { + self.clone() + .do_call(grpc_req) + .map_err(|e| Box::new(crate::Error::inner(e)) as orion_xds::grpc_deps::Error) + .boxed() + } +} diff --git a/orion-lib/src/transport/http_channel.rs b/orion-lib/src/transport/http_channel.rs new file mode 100644 index 00000000..76f17870 --- /dev/null +++ b/orion-lib/src/transport/http_channel.rs @@ -0,0 +1,489 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{bind_device::BindDevice, request_context::RequestWithContext}; +use super::{connector::LocalConnectorWithDNSResolver, request_context::RequestContext}; +use crate::listeners::http_connection_manager::RequestHandler; +use crate::HttpBody; +use crate::{ + body::timeout_body::TimeoutBody, + clusters::retry_policy::{should_retry, EventError, FailureKind}, + listeners::synthetic_http_response::SyntheticHttpResponse, + thread_local::{LocalBuilder, LocalObject}, +}; +use crate::{ + secrets::{TlsConfigurator, WantsToBuildClient}, + utils::TokioExecutor, + Error, Result, +}; +use http::Version; +use http::{ + uri::{Authority, Parts}, + HeaderValue, Response, +}; +use http_body_util::BodyExt; +use hyper::{body::Incoming, Request, Uri}; +use hyper_rustls::{FixedServerNameResolver, HttpsConnector}; +use hyper_util::client::legacy::connect::Connect; +use hyper_util::client::legacy::{Builder, Client}; +use hyper_util::rt::tokio::TokioTimer; +use orion_configuration::config::{ + cluster::http_protocol_options::{Codec, HttpProtocolOptions}, + network_filters::http_connection_manager::RetryPolicy, +}; +use pingora_timeout::fast_timeout::fast_timeout; +use pretty_duration::pretty_duration; +use rustls::ClientConfig; +use std::{ + io::ErrorKind, + mem, + result::Result as StdResult, + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::debug; +use webpki::types::ServerName; + +type IncomingResult = (std::result::Result, Error>, Duration); + +type HttpClient = Client; +type HttpsClient = Client, HttpBody>; + +// Rationale: The outer Arc is necessary to avoid building a new Client when cloning the HttpChannel. +// The inner Arc, instead, is used to pass the client to async code, so it's already wrapped by the Arc. + +#[derive(Clone, Debug)] +pub struct ClientContext { + configured_upstream_http_version: Codec, + client: Arc, Builder, HttpsConnector>>, +} +impl ClientContext { + fn new( + configured_upstream_http_version: Codec, + client: Arc, Builder, HttpsConnector>>, + ) -> Self { + Self { configured_upstream_http_version, client } + } +} + +#[derive(Clone, Debug)] +pub struct HttpChannel { + client: HttpChannelClient, + http_version: Codec, +} + +#[derive(Clone, Debug)] +pub enum HttpChannelClient { + Plain(Arc, Builder, LocalConnectorWithDNSResolver>>), + Tls(ClientContext), +} + +pub struct HttpChannelBuilder { + tls: Option>, + authority: Option, + bind_device: Option, + server_name: Option>, + http_protocol_options: HttpProtocolOptions, + connection_timeout: Option, +} + +impl LocalBuilder> for Builder { + fn build(&self, arg: LocalConnectorWithDNSResolver) -> Arc { + Arc::new(self.build(arg)) + } +} + +impl LocalBuilder, Arc> for Builder { + fn build(&self, arg: HttpsConnector) -> Arc { + Arc::new(self.build(arg)) + } +} + +impl HttpChannelBuilder { + pub fn new(bind_device: Option) -> Self { + Self { + tls: None, + authority: None, + bind_device, + http_protocol_options: Default::default(), + server_name: None, + connection_timeout: None, + } + } + + pub fn with_tls(self, tls_configurator: TlsConfigurator) -> Self { + Self { tls: Some(tls_configurator), ..self } + } + + pub fn with_timeout(self, timeout: Option) -> Self { + Self { connection_timeout: timeout, ..self } + } + + pub fn with_authority(self, authority: Authority) -> Self { + Self { authority: Some(authority), ..self } + } + + pub fn with_server_name(self, server_name: ServerName<'static>) -> Self { + Self { server_name: Some(server_name), ..self } + } + + pub fn with_http_protocol_options(self, http_protocol_options: HttpProtocolOptions) -> Self { + Self { http_protocol_options, ..self } + } + + #[allow(clippy::cast_sign_loss)] + pub fn build(self) -> crate::Result { + let authority = self.authority.clone().ok_or("Authority is mandatory")?; + let mut client_builder = Client::builder(TokioExecutor); + client_builder.timer(TokioTimer::new()); + // note: legacy client builder is not persistent struct (&mut Self -> &mut Self) + client_builder + // Set an optional timeout for idle sockets being kept-alive. A Timer is required for this to take effect. + .pool_idle_timeout( + self.http_protocol_options.common.idle_timeout.unwrap_or(std::time::Duration::from_secs(30)), + ) + // Pass a timer for the timeout... + .pool_timer(TokioTimer::new()) + .pool_max_idle_per_host(usize::MAX) + .set_host(false); + + let configured_upstream_http_version = self.http_protocol_options.codec; + + if matches!(configured_upstream_http_version, Codec::Http2) { + client_builder.http2_only(true); + let http2_options = self.http_protocol_options.http2_options; + if let Some(settings) = &http2_options.keep_alive_settings { + client_builder.http2_keep_alive_interval(settings.keep_alive_interval); + if let Some(timeout) = settings.keep_alive_timeout { + client_builder.http2_keep_alive_timeout(timeout); + }; + client_builder.http2_keep_alive_while_idle(true); + } + client_builder.http2_initial_connection_window_size(http2_options.initial_connection_window_size()); + client_builder.http2_initial_stream_window_size(http2_options.initial_stream_window_size()); + //fixme(hayley): this is not max_concurrent_streams! this is reset streams + if let Some(max) = http2_options.max_concurrent_streams() { + client_builder.http2_max_concurrent_reset_streams(max); + } + } + + if let Some(tls_context) = self.tls { + let builder = hyper_rustls::HttpsConnectorBuilder::new(); + let builder = builder.with_tls_config(tls_context.into_inner()); + let builder = builder.https_or_http(); + let builder = if let Some(server_name) = self.server_name { + builder.with_server_name_resolver(FixedServerNameResolver::new(server_name)) + } else { + let server_name = ServerName::try_from(authority.host().to_owned())?; + debug!("Server name is not configured in boostrap.. using endpoint authority {:?}", server_name); + builder.with_server_name_resolver(FixedServerNameResolver::new(server_name)) + }; + let tls_connector = match self.http_protocol_options.codec { + Codec::Http2 => builder.enable_http2().wrap_connector(LocalConnectorWithDNSResolver { + addr: authority, + bind_device: self.bind_device, + timeout: self.connection_timeout, + }), + + Codec::Http1 => builder.enable_http1().wrap_connector(LocalConnectorWithDNSResolver { + addr: authority, + bind_device: self.bind_device, + timeout: self.connection_timeout, + }), + }; + Ok(HttpChannel { + client: HttpChannelClient::Tls(ClientContext::new( + configured_upstream_http_version, + Arc::new(LocalObject::new(client_builder, tls_connector)), + )), + http_version: configured_upstream_http_version, + }) + } else { + let arg = LocalConnectorWithDNSResolver { + addr: authority, + bind_device: self.bind_device, + timeout: self.connection_timeout, + }; + + Ok(HttpChannel { + client: HttpChannelClient::Plain(Arc::new(LocalObject::new(client_builder, arg))), + http_version: configured_upstream_http_version, + }) + } + } +} + +impl<'a> RequestHandler> for &HttpChannel { + async fn to_response(self, request: RequestWithContext<'a, HttpBody>) -> Result> { + let version = request.req.version(); + + match &self.client { + HttpChannelClient::Plain(sender) => { + let RequestContext { route_timeout, retry_policy } = request.ctx.clone(); + let sender = Arc::clone(sender.get()); + let req = maybe_normalize_uri(request.req, false)?; + + let result = if let Some(t) = route_timeout { + match fast_timeout(t, HttpChannel::send_request(retry_policy.as_deref(), sender, req)).await { + Ok(result) => result, + Err(_) => (Err(EventError::RouteTimeout.into()), t), + } + } else { + HttpChannel::send_request(retry_policy.as_deref(), sender, req).await + }; + HttpChannel::handle_response(result, route_timeout, version) + }, + HttpChannelClient::Tls(context) => { + let ClientContext { configured_upstream_http_version, client: sender } = context; + let RequestContext { route_timeout, retry_policy } = request.ctx.clone(); + let configured_version = *configured_upstream_http_version; + let sender = Arc::clone(sender.get()); + + //FIXME(hayley): apply http protocol translation for plaintext too + debug!("Using TLS incoming http {version:?} configured {configured_version:?}"); + let req = maybe_normalize_uri(request.req, true)?; + let req = maybe_change_http_protocol_version(req, configured_version)?; + let result = if let Some(t) = route_timeout { + match fast_timeout(t, HttpChannel::send_request(retry_policy.as_deref(), sender, req)).await { + Ok(result) => result, + Err(_) => (Err(EventError::RouteTimeout.into()), t), + } + } else { + HttpChannel::send_request(retry_policy.as_deref(), sender, req).await + }; + + HttpChannel::handle_response(result, route_timeout, version) + }, + } + } +} + +impl HttpChannel { + /// Send the request and return the Result, either the Response or an Error, + /// along with the time spent for possible retransmissions. Note: the returned + /// duration does not include the time spent receiving the Body of the Response. + async fn send_request( + retry_policy: Option<&RetryPolicy>, + sender: Arc>, + req: Request, + ) -> (StdResult, Error>, Duration) + where + C: Connect + Clone + Send + Sync + 'static, + { + if let Some(ref policy) = retry_policy { + if policy.is_retriable(&req) { + Self::send_with_retry(policy, sender, req).await + } else { + (sender.request(req).await.map_err(Error::from), Duration::default()) + } + } else { + (sender.request(req).await.map_err(Error::from), Duration::default()) + } + } + + async fn send_with_retry( + retry_policy: &RetryPolicy, + sender: Arc>, + req: Request, + ) -> (StdResult, Error>, Duration) + where + C: Connect + Clone + Send + Sync + 'static, + { + let (parts, body) = req.into_parts(); + let body = match body.collect().await { + Ok(body) => body, + Err(e) => { + return (Err(e.into()), Duration::default()); + }, + }; + + let body = http_body_util::Full::new(body.to_bytes()); + let start_time = Instant::now(); + + for (index, back_off) in retry_policy.exponential_back_off().iter().enumerate() { + let cloned_body = body.clone().into(); + + let cloned_req: Request = Request::from_parts(parts.clone(), cloned_body); + + // actually send the request and wait for the response... + + let result: StdResult, Error> = if let Some(t) = retry_policy.per_try_timeout() { + match fast_timeout(t, sender.request(cloned_req)).await.map_err(|_| EventError::PerTryTimeout) { + Ok(result) => result.map_err(Into::into), + Err(err) => Err(err.into()), + } + } else { + sender.request(cloned_req).await.map_err(Into::into) + }; + + // generate a possible failure event... + + let Some(failure) = FailureKind::try_infer(&result) else { + return (result, start_time.elapsed()); + }; + + // check for a possible retry... + + if !should_retry(retry_policy, &failure) { + return (result, start_time.elapsed()); + } + + // take an exponential back off break and retry... + + if index < retry_policy.num_retries() as usize { + debug!( + "retry_policy: retrying request #{}/{} in {}...", + index + 1, + retry_policy.num_retries(), + pretty_duration(&back_off, None) + ); + + tokio::time::sleep(back_off).await; + } + } + + let result = Err(std::io::Error::new(ErrorKind::InvalidData, "invalid retry_policy configuration").into()); + (result, start_time.elapsed()) + } + + fn handle_response( + result: IncomingResult, + route_timeout: Option, + version: http::Version, + ) -> StdResult, Error> { + match result { + (Ok(response), elapsed) => { + // calculate the remaining timeout (relative to the route timeout) for receiving + // the body of the incoming response... + if let Some(residual_timeout) = route_timeout.map(|dur| dur.checked_sub(elapsed).unwrap_or_default()) { + // set the residual_timeout on the body of the Response + let (parts, body) = response.into_parts(); + Ok(Response::from_parts(parts, TimeoutBody::new(Some(residual_timeout), body).into())) + } else { + let (parts, body) = response.into_parts(); + Ok(Response::from_parts(parts, body.into())) + } + }, + (Err(err), dur) => { + if let Some(FailureKind::Event( + event @ (EventError::ConnectTimeout | EventError::PerTryTimeout | EventError::RouteTimeout), + )) = FailureKind::::try_infer_from_error(err.as_ref()) + { + debug!("Route: timeout ({event}) occurred after {:?}", pretty_duration(&dur, None)); + Ok(SyntheticHttpResponse::gateway_timeout().into_response(version)) + } else { + Err(err) + } + }, + } + } + + pub fn is_https(&self) -> bool { + match &self.client { + HttpChannelClient::Plain(_) => false, + HttpChannelClient::Tls(_) => true, + } + } + + pub fn http_version(&self) -> Codec { + self.http_version + } + + pub fn load(&self) -> u32 { + let load = match &self.client { + HttpChannelClient::Plain(sender) => Arc::strong_count(sender.get()), + HttpChannelClient::Tls(sender) => Arc::strong_count(sender.client.get()), + }; + u32::try_from(load).unwrap_or(u32::MAX) + } +} + +#[inline] +fn is_absolute(uri: &Uri) -> bool { + uri.authority().is_some() && uri.scheme().is_some() +} + +fn select_scheme(version: http::Version, is_tls: bool) -> Option { + match (version, is_tls) { + (http::Version::HTTP_09 | http::Version::HTTP_10 | http::Version::HTTP_11, false) => { + Some(http::uri::Scheme::HTTP) + }, + (http::Version::HTTP_09 | http::Version::HTTP_10 | http::Version::HTTP_11, true) => { + Some(http::uri::Scheme::HTTPS) + }, + (http::Version::HTTP_2, _) => Some(http::uri::Scheme::HTTPS), + _ => None, + } +} + +fn maybe_change_http_protocol_version(request: Request, version: Codec) -> Result> { + let request = maybe_update_host(request, version)?; + Ok(maybe_rewrite_version(request, version)) +} + +fn maybe_rewrite_version(mut request: Request, version: Codec) -> Request { + *request.version_mut() = match version { + Codec::Http1 => Version::HTTP_11, + Codec::Http2 => Version::HTTP_2, + }; + request +} + +fn maybe_update_host(mut request: Request, version: Codec) -> Result> { + let request_version = request.version(); + match (request_version, version) { + (Version::HTTP_11, Codec::Http2) => { + let headers = request.headers_mut(); + headers.remove(http::header::HOST); + }, + (Version::HTTP_2, Codec::Http1) => { + if let Some(authority) = request.uri().authority().cloned() { + debug!("Swaping authority/host (http2 -> http1)"); + request.headers_mut().append(http::header::HOST, HeaderValue::from_str(authority.as_str())?); + } + }, + (Version::HTTP_11, Codec::Http1) | (Version::HTTP_2, Codec::Http2) => {}, + (v, _) => { + return Err(format!("Unsupported http version {v:?}").into()); + }, + } + Ok(request) +} + +fn maybe_normalize_uri(mut request: Request, is_tls: bool) -> crate::Result> { + let uri = request.uri(); + if !is_absolute(uri) { + if let Some(host_header) = request.headers().get("host") { + let authority = host_header.to_str().map_err(|e| format!("Can't parse Host header {e:?}"))?; + let authority = authority.parse::().map_err(|e| format!("Can't parse uri {e:?}"))?; + + let version = request.version(); + let uri = request.uri_mut(); + let mut parts = Parts::from(mem::take(uri)); + if parts.scheme.is_none() { + parts.scheme = select_scheme(version, is_tls); + } + parts.authority = Some(authority); + let new = Uri::from_parts(parts).map_err(|_| format!("Can't normalize uri: {uri}"))?; + *uri = new; + } + } + Ok(request) +} diff --git a/orion-lib/src/transport/mod.rs b/orion-lib/src/transport/mod.rs new file mode 100644 index 00000000..ca829a56 --- /dev/null +++ b/orion-lib/src/transport/mod.rs @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use tokio::io::{AsyncRead, AsyncWrite}; +pub mod bind_device; +pub mod connector; +mod grpc_channel; +mod http_channel; +mod resolver; +mod tcp_channel; +pub use resolver::resolve; +pub mod request_context; +pub mod tls_inspector; +pub use self::{ + grpc_channel::GrpcService, + http_channel::{HttpChannel, HttpChannelBuilder}, + tcp_channel::TcpChannel, +}; + +pub trait AsyncReadWrite: AsyncRead + AsyncWrite + Send + Sync + Unpin {} +impl AsyncReadWrite for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin {} + +pub type AsyncStream = Box; diff --git a/orion-lib/src/transport/request_context.rs b/orion-lib/src/transport/request_context.rs new file mode 100644 index 00000000..b4b2a8d3 --- /dev/null +++ b/orion-lib/src/transport/request_context.rs @@ -0,0 +1,45 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use http::Request; +use http_body::Body; +use orion_configuration::config::network_filters::http_connection_manager::RetryPolicy; +use std::time::Duration; + +#[derive(Clone, Debug, Default)] +pub struct RequestContext<'a> { + pub route_timeout: Option, + pub retry_policy: Option<&'a RetryPolicy>, +} + +pub struct RequestWithContext<'a, B: Body> { + pub req: Request, + pub ctx: RequestContext<'a>, +} + +impl<'a, B: Body> RequestWithContext<'a, B> { + pub fn new(req: Request) -> Self { + RequestWithContext { req, ctx: RequestContext::default() } + } + + pub fn with_context(req: Request, ctx: RequestContext<'a>) -> Self { + RequestWithContext { req, ctx } + } +} diff --git a/orion-lib/src/transport/resolver.rs b/orion-lib/src/transport/resolver.rs new file mode 100644 index 00000000..0ca1a560 --- /dev/null +++ b/orion-lib/src/transport/resolver.rs @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +// Based on +// https://github.com/hickory-dns/hickory-dns/blob/v0.24.1/crates/resolver/examples/global_resolver.rs + +use std::{io, net::SocketAddr, sync::OnceLock}; + +use hickory_resolver::{name_server::TokioConnectionProvider, TokioAsyncResolver}; + +static GLOBAL_DNS_RESOLVER: OnceLock = OnceLock::new(); + +pub async fn resolve(host: &str, port: u16) -> io::Result { + match GLOBAL_DNS_RESOLVER + .get_or_init(|| -> TokioAsyncResolver { + // The TokioAsyncResolver needs a Tokio runtime already running. By encapsulating the + // initialization of the OnceLock in this lambda function, we make sure that it is + // called from the parent `async` context, which 'weakly' guarantees a Tokio runtime is on. + + #[cfg(any(unix, windows))] + { + match TokioAsyncResolver::from_system_conf(TokioConnectionProvider::default()) { + Ok(resolver) => resolver, + Err(err) => panic!("Could not initialize the DNS resolver: {err}"), + } + } + #[cfg(not(any(unix, windows)))] + { + compile_error!("DNS resolver not implemented for this platform"); + } + }) + .lookup_ip(host) + .await + .map(|lookup_ip| lookup_ip.into_iter().next()) + { + Ok(Some(ip)) => Ok(SocketAddr::new(ip, port)), + Ok(None) => Err(io::Error::new( + io::ErrorKind::AddrNotAvailable, + format!("dns resolution error for {host}: no ip found "), + )), + Err(e) => Err(io::Error::new(io::ErrorKind::AddrNotAvailable, format!("dns resolution error for {host}: {e}"))), + } +} diff --git a/orion-lib/src/transport/tcp_channel.rs b/orion-lib/src/transport/tcp_channel.rs new file mode 100644 index 00000000..82c9b9c6 --- /dev/null +++ b/orion-lib/src/transport/tcp_channel.rs @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::time::Duration; + +use super::bind_device::BindDevice; +use super::connector::{ConnectError, LocalConnectorWithDNSResolver}; +use futures::future::BoxFuture; +use http::uri::Authority; +use tokio::net::TcpStream; + +#[derive(Debug, Clone)] +pub struct TcpChannel { + connector: LocalConnectorWithDNSResolver, +} + +impl TcpChannel { + pub fn new(authority: &Authority, bind_device: Option, timeout: Option) -> Self { + Self { connector: LocalConnectorWithDNSResolver { addr: authority.clone(), bind_device, timeout } } + } + + pub fn connect(&self) -> BoxFuture<'static, std::result::Result> { + Box::pin(self.connector.connect()) + } +} diff --git a/orion-lib/src/transport/tls_inspector.rs b/orion-lib/src/transport/tls_inspector.rs new file mode 100644 index 00000000..68610379 --- /dev/null +++ b/orion-lib/src/transport/tls_inspector.rs @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use rustls::server::Acceptor; +use std::{pin::Pin, task::Poll}; +use tokio::{ + io::{AsyncRead, AsyncWrite}, + net::TcpStream, +}; + +pub struct TlsInspector<'a> { + stream: &'a mut TcpStream, + bytes_read: usize, + buffer: Vec, +} + +impl<'a> TlsInspector<'a> { + fn new(stream: &'a mut TcpStream) -> Self { + Self { stream, bytes_read: 0, buffer: Vec::with_capacity(0) } + } + pub async fn peek_sni(stream: &'a mut TcpStream) -> Option { + // we discard any errors here to simplify the code. + // the tls inspector might fail to find a handshake if we don't configure TLS for the listener, or it might fail because of some other spurious IO error. + // in the former case, we want to continue on as normal without SNI while in the latter case (which should be rare) we will fail the connection later anyways. + let handshake = tokio_rustls::LazyConfigAcceptor::new(Acceptor::default(), Self::new(stream)).await.ok()?; + handshake.client_hello().server_name().map(String::from) + } +} + +impl<'a> AsyncRead for TlsInspector<'a> { + fn poll_read( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + let Self { stream, bytes_read, buffer } = Pin::into_inner(self); + // on the first peek, we can attempt to peek directly into the provided buffer as an optimization + if *bytes_read == 0 { + let poll = Pin::new(stream).poll_peek(cx, buf); + if let Poll::Ready(Ok(n_bytes)) = poll { + *bytes_read = n_bytes; + Poll::Ready(Ok(())) + } else { + poll.map(|p| p.map(|_| ())) + } + } else { + //if we have little space left in the buffer, grow it. + // maximum size should be capped by rustls failing the handshake. + if buffer.len().checked_sub(*bytes_read).unwrap_or_default() <= 512 { + buffer.resize(buffer.len() + 4 * (1 << 10), 0); + } + let mut peek_read_buf = tokio::io::ReadBuf::new(&mut buffer[..buf.remaining()]); + let poll = Pin::new(stream).poll_peek(cx, &mut peek_read_buf); + if let Poll::Ready(Ok(n_bytes)) = poll { + //this should never fail, as that would imply we peeked less bytes than we did previously + if n_bytes >= *bytes_read { + return Poll::Ready(Err(std::io::Error::other( + "TLS inspector peeked less bytes than it did in a previous iteration", + ))); + } + let newly_read = &peek_read_buf.filled()[*bytes_read..]; + buf.put_slice(newly_read); + *bytes_read = n_bytes; + Poll::Ready(Ok(())) + } else { + poll.map(|p| p.map(|_| ())) + } + } + } +} + +// the rustls implementation requires we implement write, but we don't want to write here yet +// ideally we would refactor this code in such a way that we simply return the result of the tls handshake +// and continue from there instead of peeking +// +// for now, we simply error out on any writes. The TLS protocol should not require that we write anything before receiving the initial handshake +// see https://tls13.xargs.org/ +impl<'a> AsyncWrite for TlsInspector<'a> { + fn poll_flush( + self: Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Err(std::io::Error::other("TLS inspector tried to write to read-only stream"))) + } + fn poll_write( + self: Pin<&mut Self>, + _: &mut std::task::Context<'_>, + _: &[u8], + ) -> std::task::Poll> { + std::task::Poll::Ready(Err(std::io::Error::other("TLS inspector tried to write to read-only stream"))) + } + fn poll_shutdown( + self: Pin<&mut Self>, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Err(std::io::Error::other("TLS inspector tried to write to read-only stream"))) + } +} diff --git a/orion-lib/src/utils.rs b/orion-lib/src/utils.rs new file mode 100644 index 00000000..617157bc --- /dev/null +++ b/orion-lib/src/utils.rs @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use futures::Future; +use hyper::rt::Executor; + +#[derive(Clone)] +pub struct TokioExecutor; + +impl Executor for TokioExecutor +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + fn execute(&self, future: F) { + tokio::spawn(future); + } +} diff --git a/orion-proxy/Cargo.toml b/orion-proxy/Cargo.toml new file mode 100644 index 00000000..2d84b619 --- /dev/null +++ b/orion-proxy/Cargo.toml @@ -0,0 +1,55 @@ +[package] +description = "next generation cloud native proxy" +edition = "2021" +name = "orion-proxy" +version = "0.1.0" + +[[bin]] +name = "orion" +path = "src/main.rs" + + +[features] +console = ["dep:console-subscriber"] +dhat-heap = ["dep:dhat"] +jemalloc = ["dep:tikv-jemallocator"] + +[dependencies] + +dhat = { version = "0.3.3", optional = true } + + +abort-on-drop.workspace = true +futures.workspace = true +num_cpus.workspace = true +orion-configuration.workspace = true +orion-error.workspace = true +orion-lib.workspace = true +orion-xds.workspace = true +tokio.workspace = true +tracing.workspace = true + +affinity = "0.1.2" +console-subscriber = { version = "0.4.0", optional = true } +regex = "1.10.2" +serde = { version = "1.0", features = ["rc"] } +tracing-appender = "0.2" +tracing-subscriber = { workspace = true, features = [ + "alloc", + "env-filter", + "fmt", + "registry", + "std", +] } + +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemallocator = { version = "0.6", optional = true } + +[target.'cfg(target_os = "linux")'.dependencies] +caps = "0.5" + +[dev-dependencies] +tracing-test.workspace = true + +[lints] +workspace = true diff --git a/orion-proxy/conf/demo/demo-dynamic.yaml b/orion-proxy/conf/demo/demo-dynamic.yaml new file mode 100644 index 00000000..48ec0db4 --- /dev/null +++ b/orion-proxy/conf/demo/demo-dynamic.yaml @@ -0,0 +1,454 @@ + +node: + id: "test-id" + +dynamic_resources: + ads_config: + # api_type: GRPC + # transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: xds_cluster + + + +static_resources: + secrets: + - name: demo_ca_validation + validation_context: + trusted_ca: + #filename: ./test_certs/demo/backend.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUKnio9q6H+IJijQf1gnEL01XQ+lMwBQYDK2VwMEsxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEaMBgG + A1UEAwwRQ0RFVCBUZXN0IEJhY2tlbmQwHhcNMjQwNzAzMTMzNDQyWhcNMjcwMzMw + MTMzNDQyWjBLMQswCQYDVQQGEwJJRTERMA8GA1UECAwITGVpbnN0ZXIxDTALBgNV + BAsMBENERVQxGjAYBgNVBAMMEUNERVQgVGVzdCBCYWNrZW5kMCowBQYDK2VwAyEA + P+79mMTuxSPtABHMnovmrY/AVbYIUGOpo47N/GUa2a2jbzBtMB0GA1UdDgQWBBQV + N3P97CCbKBRwUXhGrUkoDC1TrDAfBgNVHSMEGDAWgBQVN3P97CCbKBRwUXhGrUko + DC1TrDAgBgNVHREEGTAXgg9iYWNrZW5kLmV4YW1wbGWHBH8AAAEwCQYDVR0TBAIw + ADAFBgMrZXADQQCOw2Wt5G2s2zMSAZGANcG8//MuRgLy1jBxxM51jsn0dXTZqkOb + K3pmzOjCj2reNhm9mXfYHQCj36jeOAelmWcH + -----END CERTIFICATE----- + - name: cnpp1_tls_server + tls_certificate: + certificate_chain: + #filename: ./test_certs/demo/cnpp1.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUeBx9bqEurioHO4fJD5z8nMIAOZwwBQYDK2VwMEwxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG + A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDYyODEzMTAwOVoXDTI3MDMy + NTEzMTAwOVowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD + VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD + IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE + FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i + /gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDEuZXhhbXBs + ZTAFBgMrZXADQQCzYs3yqjDLSM/I7C6jy2jyeBtbMWqsFHWDe7MWJdlW8QSBwfhp + vehKrvWcb9brXnYa2A5m0ejbT4TVoWGlylMO + -----END CERTIFICATE----- + private_key: + #filename: ./test_certs/demo/listener.key.pem + inline_string: | + -----BEGIN PRIVATE KEY----- + MC4CAQAwBQYDK2VwBCIEIH9AJcray3rNikzp7oOobt9JsDSHNQyxcT/gHP0kvnAS + -----END PRIVATE KEY----- + + - name: cnpp2_tls_server + tls_certificate: + certificate_chain: + #filename: ./test_certs/demo/cnpp2.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUK1fwYdxUatMwHY28/HicU0OdbSwwBQYDK2VwMEwxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG + A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDcwMTEyMTQ0MFoXDTI3MDMy + ODEyMTQ0MFowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD + VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD + IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE + FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i + /gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDIuZXhhbXBs + ZTAFBgMrZXADQQB3lqqrqHme+KHmhOe3QMLcPQGnaffxQQ+lrbn13cGaFnKYVBO9 + s3iidmpvZwaAOMeibNd4Ew1zEQtlwOAvdLkH + -----END CERTIFICATE----- + private_key: + #filename: ./test_certs/demo/listener.key.pem + inline_string: | + -----BEGIN PRIVATE KEY----- + MC4CAQAwBQYDK2VwBCIEIH9AJcray3rNikzp7oOobt9JsDSHNQyxcT/gHP0kvnAS + -----END PRIVATE KEY----- + + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8080 } + socket_options: + - description: "bind to interface client2-proxy" + level: 1 + name: 25 + buf_value: Y2xpZW50Mi1wcm94eQ== + filter_chains: + - name: filter_chain_http + filters: + - name: http_gateway + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + disabled: true + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "1" + tokens_per_fill: "1" + fill_interval: 5s + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_http_route + virtual_hosts: + - name: backend_http + domains: ["*"] + routes: + - match: + prefix: "/cluster1/" + route: + cluster: cluster1 + - match: + prefix: "/cluster2/" + route: + cluster: cluster2 + - match: + prefix: "/by-ua/" + headers: + - name: "user-agent" + string_match: + safe_regex: + regex: ".*curl/.*" + route: + cluster: cluster1 + - match: + prefix: "/by-ua/" + route: + cluster: cluster2 + - match: + prefix: "/direct-response/" + direct_response: + status: "403" + body: + inline_string: "Access denied\n" + - match: + prefix: "/route-timeout-and-retry/" + route: # retry only works if server returns 404 + timeout: 5s + cluster: "cluster1" + retry_policy: + retry_on: "5xx" + #retry_on: "retriable-status-codes" + #retriable_status_codes: [ "404" ] + num_retries: "3" + retry_back_off: + base_interval: 1s + max_interval: 10s + - match: # this only triggers w/ a slow server + prefix: "/upstream-req-timeout/" + route: + cluster: "cluster1" + retry_policy: + retry_on: "connect-failure,reset" + num_retries: "3" + per_try_timeout: 1s + - match: + prefix: "/rewrite/" + route: + cluster: "cluster1" + prefix_rewrite: "/rewrite-path/" + - match: + prefix: "/random-cluster/" + route: + prefix_rewrite: "/" + weighted_clusters: + clusters: + - name: cluster1 + weight: "1" + - name: cluster2 + weight: "2" + - match: + prefix: "/redirect/" + redirect: + https_redirect: true + host_redirect: "www.google.com" + port_redirect: "443" + response_code: "TEMPORARY_REDIRECT" + path_redirect: "/search" + - match: + prefix: "/rate-limit/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.config.route.v3.FilterConfig + disabled: false + - match: + prefix: "/rate-limit-override/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "5" + tokens_per_fill: "5" + fill_interval: 60s + + - name: listener_https + address: + socket_address: { address: 0.0.0.0, port_value: 8443 } + socket_options: + - description: "bind to interface client1-proxy" + level: 1 + name: 25 + # utf8 interface name - bytes encoded as base64 + buf_value: Y2xpZW50MS1wcm94eQ== + listener_filters: # Required for SNI routing + - name: listener_https_1_tls_inspector + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector + enable_ja3_fingerprinting: false + filter_chains: + - name: filter_chain_https1 + filter_chain_match: + server_names: [cnpp1.example] + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + disabled: true + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "1" + tokens_per_fill: "1" + fill_interval: 5s + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] + routes: + - match: + prefix: "/cluster1/" + route: + cluster: cluster1 + - match: + prefix: "/cluster2/" + route: + cluster: cluster2 + - match: + prefix: "/by-ua/" + headers: + - name: "user-agent" + string_match: + safe_regex: + regex: ".*curl/.*" + route: + cluster: cluster1 + - match: + prefix: "/by-ua/" + route: + cluster: cluster2 + - match: + prefix: "/direct-response/" + direct_response: + status: "403" + body: + inline_string: "Access denied\n" + - match: + prefix: "/route-timeout-and-retry/" + route: # retry only works if server returns 404 + timeout: 5s + cluster: "cluster1" + retry_policy: + retry_on: "5xx" + #retry_on: "retriable-status-codes" + #retriable_status_codes: [ "404" ] + num_retries: "3" + retry_back_off: + base_interval: 1s + max_interval: 10s + - match: # this only triggers w/ a slow server + prefix: "/upstream-req-timeout/" + route: + cluster: "cluster1" + retry_policy: + retry_on: "connect-failure,reset" + num_retries: "3" + per_try_timeout: 1s + - match: + prefix: "/rewrite/" + route: + cluster: "cluster1" + prefix_rewrite: "/rewrite-path/" + - match: + prefix: "/random-cluster/" + route: + prefix_rewrite: "/" + weighted_clusters: + clusters: + - name: cluster1 + weight: "1" + - name: cluster2 + weight: "2" + - match: + prefix: "/redirect/" + redirect: + https_redirect: true + host_redirect: "www.google.com" + port_redirect: "443" + response_code: "TEMPORARY_REDIRECT" + path_redirect: "/search" + - match: + prefix: "/rate-limit/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.config.route.v3.FilterConfig + disabled: false + - match: + prefix: "/rate-limit-override/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "5" + tokens_per_fill: "5" + fill_interval: 60s + + + transport_socket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificate_sds_secret_configs: + - name: cnpp1_tls_server + - name: filter_chain_https2 + filter_chain_match: # matches all hosts/sni + server_names: [] + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] + routes: + - match: + prefix: "/" + direct_response: + status: "200" + body: + inline_string: "This is the fallback SNI hostname route\n" + transport_socket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificate_sds_secret_configs: + - name: cnpp2_tls_server + + clusters: + - name: xds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 18000 + + - name: cluster1 + type: EDS + lb_policy: ROUND_ROBIN + upstream_bind_config: + socket_options: + - description: "bind to interface cluster1-proxy" + level: 1 + name: 25 + buf_value: Y2x1c3RlcjEtcHJveHk= + + - name: cluster2 + type: STATIC + connect_timeout: 2s + lb_policy: ROUND_ROBIN + upstream_bind_config: + # sudo setcap cap_net_raw=ep orion + socket_options: + - description: "bind to interface cluster2-proxy" + level: 1 + name: 25 + buf_value: Y2x1c3RlcjItcHJveHk= + + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: "backend.example" + common_tls_context: + validation_context_sds_secret_config: + name: demo_ca_validation + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + # force http2 + http2_protocol_options: {} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6443 + load_balancing_weight: 1 + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6444 + load_balancing_weight: 1 diff --git a/orion-proxy/conf/demo/demo-static.yaml b/orion-proxy/conf/demo/demo-static.yaml new file mode 100644 index 00000000..a44c4f61 --- /dev/null +++ b/orion-proxy/conf/demo/demo-static.yaml @@ -0,0 +1,458 @@ +static_resources: + secrets: + - name: demo_ca_validation + validation_context: + trusted_ca: + #filename: ./test_certs/demo/backend.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUKnio9q6H+IJijQf1gnEL01XQ+lMwBQYDK2VwMEsxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEaMBgG + A1UEAwwRQ0RFVCBUZXN0IEJhY2tlbmQwHhcNMjQwNzAzMTMzNDQyWhcNMjcwMzMw + MTMzNDQyWjBLMQswCQYDVQQGEwJJRTERMA8GA1UECAwITGVpbnN0ZXIxDTALBgNV + BAsMBENERVQxGjAYBgNVBAMMEUNERVQgVGVzdCBCYWNrZW5kMCowBQYDK2VwAyEA + P+79mMTuxSPtABHMnovmrY/AVbYIUGOpo47N/GUa2a2jbzBtMB0GA1UdDgQWBBQV + N3P97CCbKBRwUXhGrUkoDC1TrDAfBgNVHSMEGDAWgBQVN3P97CCbKBRwUXhGrUko + DC1TrDAgBgNVHREEGTAXgg9iYWNrZW5kLmV4YW1wbGWHBH8AAAEwCQYDVR0TBAIw + ADAFBgMrZXADQQCOw2Wt5G2s2zMSAZGANcG8//MuRgLy1jBxxM51jsn0dXTZqkOb + K3pmzOjCj2reNhm9mXfYHQCj36jeOAelmWcH + -----END CERTIFICATE----- + - name: cnpp1_tls_server + tls_certificate: + certificate_chain: + #filename: ./test_certs/demo/cnpp1.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUeBx9bqEurioHO4fJD5z8nMIAOZwwBQYDK2VwMEwxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG + A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDYyODEzMTAwOVoXDTI3MDMy + NTEzMTAwOVowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD + VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD + IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE + FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i + /gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDEuZXhhbXBs + ZTAFBgMrZXADQQCzYs3yqjDLSM/I7C6jy2jyeBtbMWqsFHWDe7MWJdlW8QSBwfhp + vehKrvWcb9brXnYa2A5m0ejbT4TVoWGlylMO + -----END CERTIFICATE----- + private_key: + #filename: ./test_certs/demo/listener.key.pem + inline_string: | + -----BEGIN PRIVATE KEY----- + MC4CAQAwBQYDK2VwBCIEIH9AJcray3rNikzp7oOobt9JsDSHNQyxcT/gHP0kvnAS + -----END PRIVATE KEY----- + + - name: cnpp2_tls_server + tls_certificate: + certificate_chain: + #filename: ./test_certs/demo/cnpp2.cert.pem + inline_string: | + -----BEGIN CERTIFICATE----- + MIIBxzCCAXmgAwIBAgIUK1fwYdxUatMwHY28/HicU0OdbSwwBQYDK2VwMEwxCzAJ + BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG + A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDcwMTEyMTQ0MFoXDTI3MDMy + ODEyMTQ0MFowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD + VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD + IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE + FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i + /gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDIuZXhhbXBs + ZTAFBgMrZXADQQB3lqqrqHme+KHmhOe3QMLcPQGnaffxQQ+lrbn13cGaFnKYVBO9 + s3iidmpvZwaAOMeibNd4Ew1zEQtlwOAvdLkH + -----END CERTIFICATE----- + private_key: + #filename: ./test_certs/demo/listener.key.pem + inline_string: | + -----BEGIN PRIVATE KEY----- + MC4CAQAwBQYDK2VwBCIEIH9AJcray3rNikzp7oOobt9JsDSHNQyxcT/gHP0kvnAS + -----END PRIVATE KEY----- + + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8080 } + socket_options: + - description: "bind to interface client2-proxy" + level: 1 + name: 25 + buf_value: Y2xpZW50Mi1wcm94eQ== + filter_chains: + - name: filter_chain_http + filters: + - name: http_gateway + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + disabled: true + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "1" + tokens_per_fill: "1" + fill_interval: 5s + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_http_route + virtual_hosts: + - name: backend_http + domains: ["*"] + routes: + - match: + prefix: "/cluster1/" + route: + cluster: cluster1 + - match: + prefix: "/cluster2/" + route: + cluster: cluster2 + - match: + prefix: "/by-ua/" + headers: + - name: "user-agent" + string_match: + safe_regex: + regex: ".*curl/.*" + route: + cluster: cluster1 + - match: + prefix: "/by-ua/" + route: + cluster: cluster2 + - match: + prefix: "/direct-response/" + direct_response: + status: "403" + body: + inline_string: "Access denied\n" + - match: + prefix: "/route-timeout-and-retry/" + route: # retry only works if server returns 404 + timeout: 5s + cluster: "cluster1" + retry_policy: + retry_on: "5xx" + #retry_on: "retriable-status-codes" + #retriable_status_codes: [ "404" ] + num_retries: "3" + retry_back_off: + base_interval: 1s + max_interval: 10s + - match: # this only triggers w/ a slow server + prefix: "/upstream-req-timeout/" + route: + cluster: "cluster1" + retry_policy: + retry_on: "connect-failure,reset" + num_retries: "3" + per_try_timeout: 1s + - match: + prefix: "/rewrite/" + route: + cluster: "cluster1" + prefix_rewrite: "/rewrite-path/" + - match: + prefix: "/random-cluster/" + route: + prefix_rewrite: "/" + weighted_clusters: + clusters: + - name: cluster1 + weight: "1" + - name: cluster2 + weight: "2" + - match: + prefix: "/redirect/" + redirect: + https_redirect: true + host_redirect: "www.google.com" + port_redirect: "443" + response_code: "TEMPORARY_REDIRECT" + path_redirect: "/search" + - match: + prefix: "/rate-limit/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.config.route.v3.FilterConfig + disabled: false + - match: + prefix: "/rate-limit-override/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "5" + tokens_per_fill: "5" + fill_interval: 60s + + - name: listener_https + address: + socket_address: { address: 0.0.0.0, port_value: 8443 } + socket_options: + - description: "bind to interface client1-proxy" + level: 1 + name: 25 + # utf8 interface name - bytes encoded as base64 + buf_value: Y2xpZW50MS1wcm94eQ== + listener_filters: # Required for SNI routing + - name: listener_https_1_tls_inspector + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector + enable_ja3_fingerprinting: false + filter_chains: + - name: filter_chain_https1 + filter_chain_match: + server_names: [cnpp1.example] + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + disabled: true + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "1" + tokens_per_fill: "1" + fill_interval: 5s + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] + routes: + - match: + prefix: "/cluster1/" + route: + cluster: cluster1 + - match: + prefix: "/cluster2/" + route: + cluster: cluster2 + - match: + prefix: "/by-ua/" + headers: + - name: "user-agent" + string_match: + safe_regex: + regex: ".*curl/.*" + route: + cluster: cluster1 + - match: + prefix: "/by-ua/" + route: + cluster: cluster2 + - match: + prefix: "/direct-response/" + direct_response: + status: "403" + body: + inline_string: "Access denied\n" + - match: + prefix: "/route-timeout-and-retry/" + route: # retry only works if server returns 404 + timeout: 5s + cluster: "cluster1" + retry_policy: + retry_on: "5xx" + #retry_on: "retriable-status-codes" + #retriable_status_codes: [ "404" ] + num_retries: "3" + retry_back_off: + base_interval: 1s + max_interval: 10s + - match: # this only triggers w/ a slow server + prefix: "/upstream-req-timeout/" + route: + cluster: "cluster1" + retry_policy: + retry_on: "connect-failure,reset" + num_retries: "3" + per_try_timeout: 1s + - match: + prefix: "/rewrite/" + route: + cluster: "cluster1" + prefix_rewrite: "/rewrite-path/" + - match: + prefix: "/random-cluster/" + route: + prefix_rewrite: "/" + weighted_clusters: + clusters: + - name: cluster1 + weight: "1" + - name: cluster2 + weight: "2" + - match: + prefix: "/redirect/" + redirect: + https_redirect: true + host_redirect: "www.google.com" + port_redirect: "443" + response_code: "TEMPORARY_REDIRECT" + path_redirect: "/search" + - match: + prefix: "/rate-limit/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.config.route.v3.FilterConfig + disabled: false + - match: + prefix: "/rate-limit-override/" + route: + cluster: "cluster1" + prefix_rewrite: "/" + typed_per_filter_config: + envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit: + "@type": type.googleapis.com/envoy.extensions.filters.http.local_ratelimit.v3.LocalRateLimit + stat_prefix: http_local_rate_limiter + token_bucket: + max_tokens: "5" + tokens_per_fill: "5" + fill_interval: 60s + + + transport_socket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificate_sds_secret_configs: + - name: cnpp1_tls_server + - name: filter_chain_https2 + filter_chain_match: # matches all hosts/sni + server_names: [] + filters: + - name: https_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + codec_type: HTTP1 + route_config: + name: basic_https_route + virtual_hosts: + - name: backend_https + domains: ["*"] + routes: + - match: + prefix: "/" + direct_response: + status: "200" + body: + inline_string: "This is the fallback SNI hostname route\n" + transport_socket: + name: envoy.transport_sockets.tls + typedConfig: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + tls_certificate_sds_secret_configs: + - name: cnpp2_tls_server + + clusters: + - name: cluster1 + type: STATIC + lb_policy: ROUND_ROBIN + + #health_checks: + #- timeout: 0.1s + # interval: 10s + # healthy_threshold: "2" + # unhealthy_threshold: "2" + # http_health_check: {} + + upstream_bind_config: + socket_options: + - description: "bind to interface cluster1-proxy" + level: 1 + name: 25 + buf_value: Y2x1c3RlcjEtcHJveHk= + + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + # force HTTP1 for the cluster + http_protocol_options: {} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6080 + load_balancing_weight: 1 + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6081 + load_balancing_weight: 1 + + - name: cluster2 + type: STATIC + connect_timeout: 2s + lb_policy: ROUND_ROBIN + + upstream_bind_config: + # sudo setcap cap_net_raw=ep orion + socket_options: + - description: "bind to interface cluster2-proxy" + level: 1 + name: 25 + buf_value: Y2x1c3RlcjItcHJveHk= + + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: "backend.example" + common_tls_context: + validation_context_sds_secret_config: + name: demo_ca_validation + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + # force http2 + http2_protocol_options: {} + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6443 + load_balancing_weight: 1 + - endpoint: + address: + socket_address: + address: 192.0.0.1 + port_value: 6444 + load_balancing_weight: 1 diff --git a/orion-proxy/conf/demo/nginx.conf b/orion-proxy/conf/demo/nginx.conf new file mode 100644 index 00000000..048b87df --- /dev/null +++ b/orion-proxy/conf/demo/nginx.conf @@ -0,0 +1,124 @@ +user www-data; +worker_processes auto; +pid /run/nginx.pid; +error_log /var/log/nginx/error.log info; +include /etc/nginx/modules-enabled/*.conf; + +events { + worker_connections 768; + # multi_accept on; +} + +http { + + sendfile on; + tcp_nopush on; + types_hash_max_size 2048; + # server_tokens off; + + # server_names_hash_bucket_size 64; + # server_name_in_redirect off; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # SSL Settings + ## + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE + ssl_prefer_server_ciphers on; + + ## + # Logging Settings + ## + + log_format combinedwithport '[$time_local] ' + '"$request" $status ' + '"$http_user_agent" $server_port'; + + access_log /var/log/nginx/access.log combinedwithport; + + ## + # Gzip Settings + ## + + gzip on; + + # gzip_vary on; + # gzip_proxied any; + # gzip_comp_level 6; + # gzip_buffers 16 8k; + # gzip_http_version 1.1; + # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + + ## + # Virtual Host Configs + ## + + #include /etc/nginx/conf.d/*.conf; + #include /etc/nginx/sites-enabled/*; + + server { + listen 6080 default_server; + listen 6081 default_server; + listen 6443 ssl http2 default_server; + listen 6444 ssl http2 default_server; + + root /var/www/html; + index index.html index.htm; + server_name backend.example; + + ssl_certificate backend.cert.pem; + ssl_certificate_key backend.key.pem; + + location / { + # this should match everything (else) w/ 200 + default_type text/plain; + return 200; + } + location /route-timeout-and-retry/ { + default_type text/plain; + return 500; + } + location /upstream-req-timeout/ { + default_type text/plain; + echo_sleep 10; + echo slow-resource-10s; + } + location /status/500/ { + default_type text/plain; + return 500; + } + location /status/404/ { + default_type text/plain; + return 404; + } + location /status/403/ { + default_type text/plain; + return 403; + } + } +} + + +#mail { +# # See sample authentication script at: +# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript +# +# # auth_http localhost/auth.php; +# # pop3_capabilities "TOP" "USER"; +# # imap_capabilities "IMAP4rev1" "UIDPLUS"; +# +# server { +# listen localhost:110; +# protocol pop3; +# proxy on; +# } +# +# server { +# listen localhost:143; +# protocol imap; +# proxy on; +# } +#} diff --git a/orion-proxy/conf/orion-runtime-health.yaml b/orion-proxy/conf/orion-runtime-health.yaml new file mode 100644 index 00000000..ab04a8c7 --- /dev/null +++ b/orion-proxy/conf/orion-runtime-health.yaml @@ -0,0 +1,165 @@ +# /proxy/ ---> (cluster1) 192.168.2.10:80 +# | +# http request ---> 192.168.1.1:8080 -| +# | +# (default) ---> (cluster2) 127.0.0.1:80 +# +# /proxy/ ---> (cluster3) 192.168.2.10:81 (h2) +# | +# http request ---> 192.168.1.1:8081 -| +# | +# / 404 +# + +runtime: + num_cpus: 1 + num_runtimes: 1 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_ev ents_per_tick: null # default + +logging: + log_level: "debug" + log_directory: "." + log_file: "orion.log" + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 127.0.0.1, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + http_filters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/" + route: + cluster: cluster_http + - name: listener_tcp + address: + socket_address: { address: 127.0.0.1, port_value: 8001 } + filterChains: + - name: filter_chain_tcp + filter_chain_match: + destination_port: 8001 + filters: + - name: tcp_proxy_filter + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + statPrefix: ingress_tcp + cluster: cluster_tcp + + clusters: + - name: cluster_http + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + + + health_checks: + - timeout: 0.1s + interval: 5s + healthy_threshold: "3" + unhealthy_threshold: "2" + http_health_check: + path: /health + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + - name: cluster_http2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: athlone_1.beefcake.com + common_tls_context: + tls_params: + tls_minimum_protocol_version: TLSv1_2 + tls_maximum_protocol_version: TLSv1_2 + tls_certificates: + - certificate_chain: + filename: ./test_certs/beefcakeCA-gathered/beefcake-athlone.cert.pem + private_key: + filename: ./test_certs/beefcakeCA-gathered/beefcake-athlone.key.pem + ## PROBLEM, depending on the configuration in the strict mode client needs to respond with certificate that matches the domain in http request which comes from the downstream + ## so if client requests dublin_1 we need to respond with SAN dublin_1 or a wildcard... otherwise we need to disable the cert validation... or disable SNI in client config + validation_context: + trusted_ca: + filename: ./test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem + + health_checks: + - timeout: 0.1s + interval: 5s + healthy_threshold: "3" + unhealthy_threshold: "2" + http_health_check: + path: /health + codec_client_type: HTTP2 + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4005 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4006 + + + diff --git a/orion-proxy/conf/orion-runtime-http.yaml b/orion-proxy/conf/orion-runtime-http.yaml new file mode 100644 index 00000000..0f1968c6 --- /dev/null +++ b/orion-proxy/conf/orion-runtime-http.yaml @@ -0,0 +1,103 @@ + +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "debug" +# log_directory: "." +# log_file: "orion.log" + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/direct-response" + headers: + - name: ":method" + exactMatch: "GET" + direct_response: + status: 200 + body: + inline_string: "meow! 🐱" + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "GET" + route: + cluster: cluster_http_1 + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "POST" + route: + cluster: cluster_http_2 + + clusters: + - name: cluster_http_1 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + + - name: cluster_http_2 + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + + diff --git a/orion-proxy/conf/orion-runtime-lb.yaml b/orion-proxy/conf/orion-runtime-lb.yaml new file mode 100644 index 00000000..f510e42e --- /dev/null +++ b/orion-proxy/conf/orion-runtime-lb.yaml @@ -0,0 +1,291 @@ +# /proxy/ ---> (cluster1) 192.168.2.10:80 +# | +# http request ---> 192.168.1.1:8080 -| +# | +# (default) ---> (cluster2) 127.0.0.1:80 +# +# /proxy/ ---> (cluster3) 192.168.2.10:81 (h2) +# | +# http request ---> 192.168.1.1:8081 -| +# | +# / 404 +# + +logging: + log_level: "info,orion_proxy=debug,orion_xds=debug,orion_lib=debug" + log_directory: "." + log_file: "orion.log" + +runtime: + num_cpus: 1 + num_runtimes: 1 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 127.0.0.1, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + statPrefix: ingress_http + codecType: HTTP1 + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/rr" + route: + cluster: cluster_rr + - match: + prefix: "/wrr" + route: + cluster: cluster_wrr + - match: + prefix: "/rand" + route: + cluster: cluster_rand + - match: + prefix: "/wrand" + route: + cluster: cluster_wrand + - match: + prefix: "/least" + route: + cluster: cluster_least + - match: + prefix: "/ring" + route: + cluster: cluster_ring + # This list of hash policies is applied sequentially, combining the hashes as they are computed. + # If one policy doesn't match, it will be skipped. + # If one policy matches and is `terminal`, the iteration ends and next policies are ignored. + # If no policy matches, a random number is used as hash. + # Beware that if `hash_policy` is defined, hashes are always computed, even if no consistent load balancer is defined. + # + # The following example will be applied as follows: + # + # GET / HTTP/1.1 + # + # Hash = hash(IP) + # + # GET /?balancing_key=foo HTTP/1.1 + # + # Hash = hash(foo) + hash(IP) + # + # GET / HTTP/1.1 + # Balancing-Key: bar + # + # Hash = hash(bar) + # + # GET /?balancing_key=foo HTTP/1.1 + # Balancing-Key: bar + # + # Hash = hash(foo) + hash(bar) + hash_policy: + - query_parameter: {name: "balancing-key"} + - header: {header_name: "Balancing-Key"} + terminal: true + - connection_properties: {source_ip: true} + - match: + prefix: "" + route: + cluster: cluster_wrr + + clusters: + - name: cluster_rr + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN # should distribute traffic in order + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + - name: cluster_wrr + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN # should distribute traffic in order + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + load_balancing_weight: 4 # Should receive 40% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + load_balancing_weight: 3 # Should receive 30% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + load_balancing_weight: 2 # Should receive 20% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + load_balancing_weight: 1 # Should receive 10% of traffic + + - name: cluster_rand + connect_timeout: 0.25s + type: STATIC + lb_policy: RANDOM # should distribute traffic randomly + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + - name: cluster_wrand + connect_timeout: 0.25s + type: STATIC + lb_policy: RANDOM # should distribute traffic randomly + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + load_balancing_weight: 4 # Should receive 40% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + load_balancing_weight: 3 # Should receive 30% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + load_balancing_weight: 2 # Should receive 20% of traffic + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + load_balancing_weight: 1 # Should receive 10% of traffic + + - name: cluster_least + connect_timeout: 0.25s + type: STATIC + lb_policy: LEAST_REQUEST # should distribute traffic to endpoints with least requests + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + - name: cluster_ring + connect_timeout: 0.25s + type: STATIC + lb_policy: RING_HASH # should distribute traffic consistenly + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 diff --git a/orion-proxy/conf/orion-runtime-weighted-clusters.yaml b/orion-proxy/conf/orion-runtime-weighted-clusters.yaml new file mode 100644 index 00000000..0fa47a29 --- /dev/null +++ b/orion-proxy/conf/orion-runtime-weighted-clusters.yaml @@ -0,0 +1,126 @@ +# /proxy/ ---> (cluster1) 192.168.2.10:80 +# | +# http request ---> 192.168.1.1:8080 -| +# | +# (default) ---> (cluster2) 127.0.0.1:80 +# +# /proxy/ ---> (cluster3) 192.168.2.10:81 (h2) +# | +# http request ---> 192.168.1.1:8081 -| +# | +# / 404 +# + +logging: + log_level: "error,orion_proxy=debug,orion_xds=debug,orion_lib=debug" + log_directory: "." + log_file: "orion.log" + +runtime: + num_cpus: 1 + num_runtimes: 1 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 127.0.0.1, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "GET" + route: + weighted_clusters: + clusters: + - name: cluster_http + weight: "1" + - name: cluster_http + weight: "2" + - name: cluster_http + weight: "3" + - match: + prefix: "/" + route: + cluster: cluster_http + - name: listener_tcp + address: + socket_address: { address: 127.0.0.1, port_value: 8001 } + filterChains: + - name: filter_chain_tcp + filter_chain_match: + destination_port: 8001 + filters: + - name: tcp_proxy_filter + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + statPrefix: ingress_tcp + cluster: cluster_tcp + + clusters: + - name: cluster_http + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4002 + + - name: cluster_tcp + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 4004 + + + diff --git a/orion-proxy/conf/orion-runtime-xds.yaml b/orion-proxy/conf/orion-runtime-xds.yaml new file mode 100644 index 00000000..550ab62f --- /dev/null +++ b/orion-proxy/conf/orion-runtime-xds.yaml @@ -0,0 +1,53 @@ +# /proxy/ ---> (cluster1) 192.168.2.10:80 +# | +# http request ---> 192.168.1.1:8080 -| +# | +# (default) ---> (cluster2) 127.0.0.1:80 +# +# /proxy/ ---> (cluster3) 192.168.2.10:81 (h2) +# | +# http request ---> 192.168.1.1:8081 -| +# | +# / 404 +# + +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "debug" + # log_directory: "." + # log_file: "orion.log" + +envoy_bootstrap: + dynamic_resources: + ads_config: + grpc_services: + - envoy_grpc: + cluster_name: xds_cluster + + static_resources: + clusters: + - name: xds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + # force http2 + http2_protocol_options: {} + load_assignment: + cluster_name: xds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 50051 diff --git a/orion-proxy/conf/orion-runtime.yaml b/orion-proxy/conf/orion-runtime.yaml new file mode 100644 index 00000000..09061a57 --- /dev/null +++ b/orion-proxy/conf/orion-runtime.yaml @@ -0,0 +1,125 @@ +# /proxy/ ---> (cluster1) 192.168.2.10:80 +# | +# http request ---> 192.168.1.1:8080 -| +# | +# (default) ---> (cluster2) 127.0.0.1:80 +# +# /proxy/ ---> (cluster3) 192.168.2.10:81 (h2) +# | +# http request ---> 192.168.1.1:8081 -| +# | +# / 404 +# + +runtime: + num_cpus: 2 + num_runtimes: 2 + event_interval: 31 # estimated optimal value + global_queue_interval: null # default + max_io_events_per_tick: null # default + +logging: + log_level: "debug" +# log_directory: "." +# log_file: "orion.log" + +envoy_bootstrap: + static_resources: + listeners: + - name: listener_http + address: + socket_address: { address: 0.0.0.0, port_value: 8000 } + filterChains: + - name: filter_chain_http + filter_chain_match: + destination_port: 8000 + filters: + - name: http_gateway + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + statPrefix: ingress_http + codecType: HTTP1 + httpFilters: + - name: envoy.filters.http.router + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + start_child_span: false + routeConfig: + name: basic_http_route + virtual_hosts: + - name: backend + domains: ["*"] + routes: + - match: + prefix: "/direct-response" + headers: + - name: ":method" + exactMatch: "GET" + direct_response: + status: 200 + body: + inline_string: "meow! 🐱" + - match: + prefix: "/" + headers: + - name: ":method" + exactMatch: "GET" + route: + cluster: cluster_http + - match: + prefix: "/" + route: + cluster: cluster_http + - name: listener_tcp + address: + socket_address: { address: 0.0.0.0, port_value: 8001 } + filterChains: + - name: filter_chain_tcp + filter_chain_match: + destination_port: 8001 + filters: + - name: tcp_proxy_filter + typedConfig: + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy + statPrefix: ingress_tcp + cluster: cluster_tcp + + clusters: + - name: cluster_http + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.206.137.58 + port_value: 4001 + - endpoint: + address: + socket_address: + address: 10.206.137.58 + port_value: 4002 + + - name: cluster_tcp + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.206.137.58 + port_value: 4003 + - endpoint: + address: + socket_address: + address: 10.206.137.58 + port_value: 4004 + + + diff --git a/orion-proxy/src/core_affinity.rs b/orion-proxy/src/core_affinity.rs new file mode 100644 index 00000000..193d4051 --- /dev/null +++ b/orion-proxy/src/core_affinity.rs @@ -0,0 +1,599 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_configuration::config::runtime::{Affinity, CoreId}; +use orion_lib::Result; +use std::collections::BTreeMap; +use std::collections::HashSet; + +use crate::runtime::RuntimeId; + +#[allow(unused_macros)] +#[macro_export] +macro_rules! core_ids { + ($($x:expr),*) => ( + vec![$($x),*].into_iter().map(CoreId::new).collect::>() + ); +} + +/// Return the number of the available cores to the caller thread. +#[inline] +pub fn get_avail_core_num() -> Result { + affinity::get_thread_affinity().map_err(|err| format!("get_avail_core_num: {err}").into()).map(|cpus| cpus.len()) +} + +/// Retrieve the current set of cores available to the caller thread. +#[inline] +pub fn get_core_ids() -> Result> { + affinity::get_thread_affinity() + .map_err(|err| format!("get_cores_id: {err}").into()) + .map(|cores| cores.into_iter().map(CoreId::new).collect()) +} + +/// Set the current set of cores available to the caller thread. +#[inline] +pub fn set_cores_for_current(cores: &[CoreId]) -> Result<()> { + affinity::set_thread_affinity(cores.iter().map(|x| **x).collect::>()) + .map_err(|err| format!("set_cores_for_current: {err}").into()) +} + +/// Returns core IDs grouped by NUMA node, with each inner vector representing the cores for a specific node +#[inline] +pub fn get_cores_ids_per_node() -> Result>> { + let cores = get_core_ids()?; + std::fs::read_to_string("/proc/cpuinfo").map_err(Into::into).and_then(|cpuinfo| group_by_numa(cores, &cpuinfo)) +} + +/// Groups core IDs by NUMA node based on CPU information. Returns a vector of vectors, +/// where each inner vector contains core IDs for a specific NUMA node, or an `Err` if no mapping is found. +fn group_by_numa(cores: Vec, cpuinfo: &str) -> Result>> { + let get_values = |needle: &str, haystack: &str| { + haystack + .lines() + .filter(|l| l.starts_with(needle)) + .filter_map(|s| { + let xs = s.split(':').collect::>(); + if xs.len() == 2 { + xs[1].trim().parse::().ok() + } else { + None + } + }) + .collect::>() + }; + + let processor_map: BTreeMap = get_values("processor", cpuinfo) + .into_iter() + .zip(get_values("physical id", cpuinfo)) + .collect::>(); + + if processor_map.is_empty() { + return Err("cpuinfo: parser error".into()); + } + + let mut groups: BTreeMap> = BTreeMap::new(); + for core in cores { + if let Some(key) = processor_map.get(&core) { + groups.entry(CoreId::new(*key)).or_default().push(core); + } else { + return Err(format!("cpuinfo: could not find mapping for core {core}").into()); + } + } + + Ok(groups.into_values().collect::>()) +} + +pub trait AffinityStrategy { + fn run_strategy(&self, runtime_id: RuntimeId, cores_wanted: usize) -> Result>; +} + +impl AffinityStrategy for Affinity { + fn run_strategy(&self, runtime_id: RuntimeId, cores_wanted: usize) -> Result> { + match self { + Affinity::Auto => { + let cores_avail = get_cores_ids_per_node()?; + run_strategy(runtime_id, cores_wanted, None, cores_avail) + }, + Affinity::Nodes(cs) => { + let cores_avail = get_cores_ids_per_node()?; + run_strategy(runtime_id, cores_wanted, Some(cs.clone()), cores_avail) + }, + Affinity::Runtimes(rs) => { + let v = rs + .get(*runtime_id) + .ok_or_else(|| format!("could not find configuration for runtime {runtime_id}")) + .cloned()?; + + let v = v.into_iter().take(cores_wanted).collect::>(); + if v.len() != cores_wanted { + return Err(format!( + "not enough cores for runtime {runtime_id} - wanted: {}, available: {}", + cores_wanted, + v.len() + ) + .into()); + } + + Ok(v) + }, + } + } +} + +// Applies an automatic core affinity strategy for the runtime (identified by its ID) based on the provided information, +// including the required number of cores, any specified core affinity, and the cores available to the calling thread. +fn run_strategy( + runtime_id: RuntimeId, + cores_wanted: usize, + cores_affinity: Option>>, + cores_avail: Vec>, +) -> Result> { + let avail_set = cores_avail.clone().into_iter().flatten().collect::>(); + + // Retrieve the affinity set or use the available cores if not specified. + // NOTE: the available cores do not account for NUMA architectures, + // as they are retrieved as a flat vector. + + let aff = cores_affinity.unwrap_or(cores_avail); + + // ensure that all the cores specified in the strategy are available for the process... + let aff_set = aff.clone().into_iter().flatten().collect::>(); + + if !avail_set.is_superset(&aff_set) { + return Err(format!( + "the cores {:?} are available", + aff_set.difference(&avail_set).copied().collect::>() + ) + .into()); + } + + // Perform a round-robin selection among the NUMA vectors, selecting the node and + // then the chunk of cores to use for binding. + + let node = runtime_id + .0 + .checked_rem(aff.len()) + .and_then(|idx| aff.get(idx)) + .ok_or_else(|| "unexpected affinity vector length".to_owned())?; + + let cores = node + .iter() + .skip( + runtime_id.0.checked_div(aff.len()).ok_or_else(|| "unexpected affinity vector length".to_owned())? + * cores_wanted, + ) + .take(cores_wanted) + .copied() + .collect::>(); + + if cores.len() == cores_wanted { + Ok(cores) + } else { + Err(format!("not enough cores for runtime {runtime_id} - wanted: {}, available: {}", cores_wanted, cores.len()) + .into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_numa_no_cpuinfo() { + let cores = core_ids![0, 1, 2, 3]; + assert!(group_by_numa(cores, "").is_err()); + } + + #[test] + fn test_group_by_numa_bad_cpuinfo() { + let cores = core_ids![0, 1, 2, 3]; + assert!(group_by_numa(cores, "deadbeef").is_err()); + } + + #[test] + fn test_group_by_numa_single() { + let cores = core_ids![0, 1, 2, 3]; + let cpuinfo = r###"processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management:"###; + assert_eq!(group_by_numa(cores, cpuinfo).unwrap(), vec![core_ids![0, 1, 2, 3]]); + } + + #[test] + fn test_group_by_numa_err() { + let cores = core_ids![4, 5, 6, 7]; + let cpuinfo = r###"processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management:"###; + assert!(group_by_numa(cores, cpuinfo).is_err()); + } + + #[test] + fn test_group_by_numa_dual_nodes() { + let cores = core_ids![0, 1, 2, 3]; + let cpuinfo = r###"processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 1 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 140 +model name : 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz +stepping : 1 +microcode : 0xffffffff +cpu MHz : 2803.213 +cache size : 12288 KB +physical id : 1 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 21 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid fsrm avx512_vp2intersect md_clear flush_l1d arch_capabilities +bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs retbleed eibrs_pbrsb gds +bogomips : 5606.42 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management:"###; + assert_eq!(group_by_numa(cores, cpuinfo).unwrap(), vec![core_ids![0, 2], core_ids![1, 3]]); + } + + #[test] + fn affinity_strategy_unspecified() { + let avail = vec![core_ids![0, 1, 2, 3, 4, 5, 6, 7, 8]]; + let cores_affinity = None; + assert_eq!(run_strategy(RuntimeId(0), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0]); + assert_eq!(run_strategy(RuntimeId(1), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![1]); + assert_eq!(run_strategy(RuntimeId(0), 2, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0, 1]); + assert_eq!(run_strategy(RuntimeId(1), 2, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![2, 3]); + assert_eq!( + run_strategy(RuntimeId(0), 4, cores_affinity.clone(), avail.clone()).unwrap(), + core_ids![0, 1, 2, 3] + ); + } + + #[test] + fn affinity_strategy_single() { + let avail = vec![core_ids![0, 1, 2, 3, 4, 5, 6, 7, 8]]; + let cores_affinity = Some(vec![core_ids![0, 1, 2, 3]]); + assert_eq!(run_strategy(RuntimeId(0), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0]); + assert_eq!(run_strategy(RuntimeId(1), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![1]); + assert_eq!(run_strategy(RuntimeId(0), 2, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0, 1]); + assert_eq!(run_strategy(RuntimeId(1), 2, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![2, 3]); + assert_eq!( + run_strategy(RuntimeId(0), 4, cores_affinity.clone(), avail.clone()).unwrap(), + core_ids![0, 1, 2, 3] + ); + assert!(run_strategy(RuntimeId(1), 4, cores_affinity.clone(), avail.clone()).is_err()); + } + + #[test] + fn affinity_strategy_numa() { + let avail = vec![core_ids![0, 1, 2, 3, 4, 5, 6, 7, 8]]; + let cores_affinity = Some(vec![core_ids![0, 1, 2], core_ids![3, 4, 5], core_ids![6, 7, 8]]); + + assert_eq!(run_strategy(RuntimeId(0), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0]); + assert_eq!(run_strategy(RuntimeId(1), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![3]); + assert_eq!(run_strategy(RuntimeId(2), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![6]); + assert_eq!(run_strategy(RuntimeId(3), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![1]); + assert_eq!(run_strategy(RuntimeId(4), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![4]); + assert_eq!(run_strategy(RuntimeId(5), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![7]); + assert_eq!(run_strategy(RuntimeId(6), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![2]); + assert_eq!(run_strategy(RuntimeId(7), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![5]); + assert_eq!(run_strategy(RuntimeId(8), 1, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![8]); + assert!(run_strategy(RuntimeId(9), 1, cores_affinity.clone(), avail.clone()).is_err()); + + assert_eq!(run_strategy(RuntimeId(0), 3, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![0, 1, 2]); + assert_eq!(run_strategy(RuntimeId(1), 3, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![3, 4, 5]); + assert_eq!(run_strategy(RuntimeId(2), 3, cores_affinity.clone(), avail.clone()).unwrap(), core_ids![6, 7, 8]); + assert!(run_strategy(RuntimeId(3), 3, cores_affinity.clone(), avail.clone()).is_err()); + } +} diff --git a/orion-proxy/src/lib.rs b/orion-proxy/src/lib.rs new file mode 100644 index 00000000..06b0587a --- /dev/null +++ b/orion-proxy/src/lib.rs @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_configuration::{ + config::{Config, Log as LogConf}, + options::Options, +}; +use orion_lib::{Result, RUNTIME_CONFIG}; +use tracing_appender::non_blocking::WorkerGuard; +use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry}; + +#[macro_use] +mod core_affinity; +mod proxy; +mod runtime; +mod xds_configurator; + +pub fn run() -> Result<()> { + let options = Options::parse_options(); + let Config { runtime, logging, bootstrap } = Config::new(&options)?; + RUNTIME_CONFIG.set(runtime).map_err(|_| "runtime config was somehow set before we had a chance to set it")?; + let _guard = init_tracing(logging); + #[cfg(target_os = "linux")] + if !(caps::has_cap(None, caps::CapSet::Permitted, caps::Capability::CAP_NET_RAW)?) { + tracing::warn!("CAP_NET_RAW is NOT available, SO_BINDTODEVICE will not work"); + } + proxy::run_proxy(bootstrap) +} + +#[cfg(feature = "console")] +use console_subscriber; + +#[cfg(feature = "console")] +fn init_tracing(_conf: LogConf) -> WorkerGuard { + let (_non_blocking, guard) = tracing_appender::non_blocking(std::io::stdout()); + console_subscriber::init(); + guard +} + +#[cfg(not(feature = "console"))] +fn init_tracing(log_conf: LogConf) -> WorkerGuard { + let env_filter = EnvFilter::try_from_default_env().ok().or(log_conf.log_level).unwrap_or_else(|| { + EnvFilter::builder() + .with_default_directive(tracing_subscriber::filter::LevelFilter::ERROR.into()) + .parse_lossy("") + }); + + match log_conf.log_file.as_ref() { + None => { + let out = std::io::stdout(); + let is_terminal = std::io::IsTerminal::is_terminal(&out); + let (non_blocking, guard) = tracing_appender::non_blocking(out); + let mut std_layer = fmt::layer().with_writer(non_blocking).with_thread_names(true); + + if !is_terminal { + std_layer = std_layer.with_ansi(false); + } + + Registry::default().with(env_filter).with(std_layer).init(); + guard + }, + Some(filename) => { + let file_appender = + tracing_appender::rolling::hourly(log_conf.log_directory.as_ref().unwrap_or(&".".into()), filename); + let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); + let file_layer = fmt::layer().with_ansi(false).with_writer(non_blocking).with_thread_names(true); + Registry::default().with(env_filter).with(file_layer).init(); + guard + }, + } +} diff --git a/orion-proxy/src/main.rs b/orion-proxy/src/main.rs new file mode 100644 index 00000000..d827f535 --- /dev/null +++ b/orion-proxy/src/main.rs @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#[cfg(all(feature = "jemalloc", feature = "dhat-heap"))] +compile_error!("feature \"jemalloc\" and feature \"dhat-heap\" cannot be enabled at the same time"); + +#[cfg(all(feature = "jemalloc", not(target_env = "msvc")))] +use tikv_jemallocator::Jemalloc; + +#[cfg(all(feature = "jemalloc", not(target_env = "msvc")))] +#[global_allocator] +static GLOBAL: Jemalloc = Jemalloc; + +#[cfg(feature = "dhat-heap")] +#[global_allocator] +static ALLOC: dhat::Alloc = dhat::Alloc; +fn main() -> orion_error::Result<()> { + #[cfg(feature = "dhat-heap")] + let _profiler = dhat::Profiler::new_heap(); + orion_proxy::run() +} diff --git a/orion-proxy/src/proxy.rs b/orion-proxy/src/proxy.rs new file mode 100644 index 00000000..45e4c882 --- /dev/null +++ b/orion-proxy/src/proxy.rs @@ -0,0 +1,207 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::{ + core_affinity, + runtime::{self, RuntimeId}, + xds_configurator::XdsConfigurationHandler, +}; +use futures::future::join_all; +use orion_configuration::config::{bootstrap::Node, Bootstrap}; +use orion_error::ResultExtension; +use orion_lib::{ + get_listeners_and_clusters, new_configuration_channel, runtime_config, ConfigurationReceivers, + ConfigurationSenders, ListenerConfigurationChange, Result, SecretManager, +}; +use std::{ + sync::atomic::AtomicUsize, + thread::{self, JoinHandle}, +}; +use tokio::{runtime::Builder, sync::mpsc::Sender}; +use tracing::{debug, error, info, warn}; + +pub fn run_proxy(bootstrap: Bootstrap) -> Result<()> { + debug!("Starting on thread {:?}", std::thread::current().name()); + launch_runtimes(bootstrap).context("failed to launch runtimes") +} + +fn calculate_threads_per_runtime(num_cpus: usize, num_runtimes: usize) -> Result { + let avail_cpus = core_affinity::get_avail_core_num()?; + if num_cpus > avail_cpus { + return Err( + format!("The number of CPUs ({num_cpus}) exceeds those available for this process ({avail_cpus})").into() + ); + } + + let threads = num_cpus / num_runtimes; + if threads == 0 { + return Err( + format!("The number of runtimes greater than the number of cpus ({num_cpus} < {num_runtimes})").into() + ); + } + + if num_cpus % num_runtimes != 0 { + return Err(format!( + "The number of CPUs ({num_cpus}) is not a multiple of the number of runtimes ({num_runtimes})", + ) + .into()); + } + + Ok(threads) +} + +fn launch_runtimes(bootstrap: Bootstrap) -> Result<()> { + let config = runtime_config(); + let num_runtimes = config.num_runtimes(); + let num_cpus = config.num_cpus(); + info!("Launching with {} cpus, {} runtimes", num_cpus, num_runtimes); + + let handles = { + let num_threads_per_runtime = calculate_threads_per_runtime(num_cpus, num_runtimes) + .context("failed to calculate number of threads to use per runtime")?; + info!("using {} runtimes with {num_threads_per_runtime} threads each", config.num_runtimes()); + + (0..num_runtimes) + .map(|id| spawn_runtime_from_thread(num_threads_per_runtime, RuntimeId(id))) + .collect::>>()? + }; + + let (handles, configuration_senders): (Vec<_>, Vec<_>) = handles.into_iter().unzip(); + + // The xDS runtime always runs - this is necessary for initialization even if we do not + // use dynamic updates from remote xDS servers. The decision on whether dynamic updates + // are used is based on: + // - The bootstrap loader from orion-data-plane-api gets the list of cluster names used + // in dynamic_resources/ads_config (for grpc_services) + // - resolve ads clusters into endpoints, to be used as xDS address + // TODO: the xDS client could receive updates for endpoints too i.e. dynamic clusters. We + // should replace this with passing a configuration receiver. For now endpoints from + // static clusters. + + let ads_cluster_names: Vec = bootstrap.get_ads_configs().iter().map(ToString::to_string).collect(); + let node = bootstrap.node.clone().unwrap_or_else(|| Node { id: "".into() }); + + let (secret_manager, listeners, clusters) = + get_listeners_and_clusters(bootstrap).context("Failed to get listeners and clusters")?; + + if listeners.is_empty() && ads_cluster_names.is_empty() { + return Err("No listeners and no ads clusters configured".into()); + } + + let _guard = match xds_loop(node, configuration_senders, secret_manager, listeners, clusters, ads_cluster_names) { + Ok(g) => { + debug!("xDS loop finished"); + g + }, + Err(err) => { + error!("xDS loop exited with error: {err}"); + return Err(err); + }, + }; + + for handle in handles { + if let Err(err) = handle.join() { + warn!("Closing handler with error {err:?}"); + } + } + Ok(()) +} + +type RuntimeHandle = (JoinHandle>, ConfigurationSenders); + +fn spawn_runtime_from_thread(num_threads: usize, runtime_id: RuntimeId) -> Result { + let (configuration_senders, configuration_receivers) = new_configuration_channel(100); + + let handle: JoinHandle> = + thread::Builder::new().name(format!("proxy_{runtime_id}")).spawn(move || { + let rt = runtime::build_tokio_runtime(num_threads, runtime_id); + rt.block_on(async { + tokio::select! { + _ = start_proxy(configuration_receivers) => { + info!("Proxy Runtime terminated!"); + Ok(()) + } + _ = tokio::signal::ctrl_c() => { + info!("CTRL+C catch (Proxy runtime)!"); + Ok(()) + } + } + }) + })?; + Ok((handle, configuration_senders)) +} + +//TODO: this is crap +fn xds_loop( + node: Node, + configuration_senders: Vec, + secret_manager: SecretManager, + listeners: Vec, + clusters: Vec, + ads_cluster_names: Vec, +) -> Result { + let mut builder = Builder::new_multi_thread(); + let runtime = builder + .enable_all() + .worker_threads(1) + .max_blocking_threads(1) + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + format!("xdstask_{id}") + }) + .build() + .expect("failed to build basic runtime"); + runtime.block_on(async move { + let secret_manager = + configure_initial_resources(secret_manager, listeners, configuration_senders.clone()).await?; + let xds_runtime = XdsConfigurationHandler::new(secret_manager, configuration_senders); + + xds_runtime.run(node, clusters, ads_cluster_names).await + }) +} + +async fn configure_initial_resources( + secret_manager: SecretManager, + listeners: Vec, + configuration_senders: Vec, +) -> Result { + let listeners_tx: Vec<_> = configuration_senders + .into_iter() + .map(|ConfigurationSenders { listener_configuration_sender, route_configuration_sender: _ }| { + listener_configuration_sender + }) + .collect(); + + for listener in listeners { + let _ = join_all(listeners_tx.iter().map(|listener_tx: &Sender| { + listener_tx.send(ListenerConfigurationChange::Added(listener.clone())) + })) + .await; + } + + Ok(secret_manager) +} + +async fn start_proxy(configuration_receivers: ConfigurationReceivers) -> Result<()> { + let mut set = orion_lib::start_ng_on_joinset(configuration_receivers)?; + while set.join_next().await.is_some() {} + Ok(()) +} diff --git a/orion-proxy/src/runtime.rs b/orion-proxy/src/runtime.rs new file mode 100644 index 00000000..0e78b8d6 --- /dev/null +++ b/orion-proxy/src/runtime.rs @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use crate::core_affinity::{self, AffinityStrategy}; +use orion_lib::runtime_config; +use std::{fmt::Display, ops::Deref}; +use tokio::runtime::{Builder, Runtime}; +use tracing::{info, warn}; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct RuntimeId(pub usize); + +impl Display for RuntimeId { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Deref for RuntimeId { + type Target = usize; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub fn build_tokio_runtime(num_threads: usize, runtime_id: RuntimeId) -> Runtime { + let thread_name = "proxytask"; + let config = runtime_config(); + if num_threads == 1 { + if let Some(affinity) = &config.affinity_strategy { + match affinity.run_strategy(runtime_id, num_threads) { + Ok(aff) => { + if let Err(err) = core_affinity::set_cores_for_current(&aff) { + warn!("{thread_name}: Couldn't pin thread to core {aff:?}: {err}"); + } else { + info!("{thread_name}: ST-runtime[{runtime_id}] pinned to core {aff:?}"); + } + }, + Err(e) => { + warn!("{thread_name}: Strategy: {e}"); + }, + } + } + + let mut builder = Builder::new_current_thread(); + builder.enable_all(); + + config.global_queue_interval.map(|val| builder.global_queue_interval(val.into())); + config.event_interval.map(|val| builder.event_interval(val)); + config.max_io_events_per_tick.map(|val| builder.max_io_events_per_tick(val.into())); + #[allow(clippy::expect_used)] + builder.thread_name(format!("{thread_name}{}", runtime_id.0)).build().expect("failed to build basic runtime") + } else { + if let Some(affinity) = &config.affinity_strategy { + match affinity.run_strategy(runtime_id, num_threads) { + Ok(aff) => { + if let Err(err) = core_affinity::set_cores_for_current(&aff) { + warn!("{thread_name}: Couldn't pin thread to core {aff:?}: {err}"); + } else { + info!("{thread_name} MT-{num_threads}-runtime[{runtime_id}] pinned to cores {aff:?}"); + } + }, + Err(e) => { + warn!("{thread_name}: Strategy: {e}"); + }, + } + } + + let mut builder = Builder::new_multi_thread(); + builder.enable_all().worker_threads(num_threads).max_blocking_threads(num_threads); + + config.global_queue_interval.map(|val| builder.global_queue_interval(val.into())); + config.event_interval.map(|val| builder.event_interval(val)); + config.max_io_events_per_tick.map(|val| builder.max_io_events_per_tick(val.into())); + + let name = thread_name.to_owned(); + #[allow(clippy::expect_used)] + builder + .thread_name_fn(move || format!("{name}{}", runtime_id.0)) + .build() + .expect("failed to build threaded runtime") + } +} diff --git a/orion-proxy/src/xds_configurator.rs b/orion-proxy/src/xds_configurator.rs new file mode 100644 index 00000000..dc4d9e52 --- /dev/null +++ b/orion-proxy/src/xds_configurator.rs @@ -0,0 +1,309 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use abort_on_drop::ChildTask; +use futures::future::join_all; +use orion_configuration::config::{bootstrap::Node, cluster::ClusterSpecifier}; +use orion_lib::{ + ConfigurationSenders, ConversionContext, EndpointHealthUpdate, HealthCheckManager, ListenerConfigurationChange, + ListenerFactory, PartialClusterLoadAssignment, PartialClusterType, Result, RouteConfigurationChange, SecretManager, +}; +use orion_xds::{ + start_aggregate_client_no_retry_loop, + xds::{ + bindings::AggregatedDiscoveryType, + client::XdsUpdateEvent, + client::{DeltaClientBackgroundWorker, DeltaDiscoveryClient, DeltaDiscoverySubscriptionManager}, + model::{RejectedConfig, TypeUrl, XdsResourcePayload, XdsResourceUpdate}, + }, +}; +use std::time::Duration; +use tokio::{ + select, + sync::mpsc::{self, Receiver, Sender}, +}; +use tracing::{debug, info, warn}; + +const RETRY_INTERVAL: Duration = Duration::from_secs(10); + +pub struct XdsConfigurationHandler { + secret_manager: SecretManager, + health_manager: HealthCheckManager, + listeners_senders: Vec>, + route_senders: Vec>, + health_updates_receiver: Receiver, +} + +impl XdsConfigurationHandler { + pub fn new(secret_manager: SecretManager, configuration_senders: Vec) -> Self { + let mut listeners_senders = Vec::with_capacity(configuration_senders.len()); + let mut route_senders = Vec::with_capacity(configuration_senders.len()); + for ConfigurationSenders { listener_configuration_sender, route_configuration_sender } in configuration_senders + { + listeners_senders.push(listener_configuration_sender); + route_senders.push(route_configuration_sender); + } + let (health_updates_sender, health_updates_receiver) = mpsc::channel(1000); + let health_manager = HealthCheckManager::new(health_updates_sender); + Self { secret_manager, health_manager, listeners_senders, route_senders, health_updates_receiver } + } + + // Resolve cluster name into working endpoint, return working client + fn resolve_endpoint( + cluster_name: &str, + node: &Node, + ) -> Result<( + DeltaClientBackgroundWorker>, + DeltaDiscoveryClient, + DeltaDiscoverySubscriptionManager, + )> { + let selector = ClusterSpecifier::Cluster(cluster_name.into()); + + let grpc_service = match orion_lib::clusters::get_grpc_connection(&selector) { + Ok(grpc_service) => grpc_service, + Err(err) => { + let msg = format!("Failed to get gRPC channel from cluster ({cluster_name}): {err}"); + warn!(msg); + return Err(msg.into()); + }, + }; + + start_aggregate_client_no_retry_loop(node.clone(), grpc_service) + .inspect_err(|e| warn!("Failed to connect to xDS server ({cluster_name}): {e}")) + .map_err(Into::into) + } + + pub async fn run( + mut self, + node: Node, + initial_clusters: Vec, + ads_cluster_names: Vec, + ) -> Result { + select! { + _ = tokio::signal::ctrl_c() => info!("CTRL+C catch (XDS runtime)!"), + result = self.run_loop(node, initial_clusters, ads_cluster_names) => result?, + } + Ok(self) + } + + async fn run_loop( + &mut self, + node: Node, + initial_clusters: Vec, + ads_cluster_names: Vec, + ) -> Result<()> { + for partial_cluster in initial_clusters { + if let Err(err) = self.add_cluster(partial_cluster).await { + tracing::error!("Could not add cluster: {}", err); + } + } + + let mut cluster_names = ads_cluster_names.into_iter().cycle(); + + let (mut worker, mut client, _subscription_manager) = loop { + let Some(cluster_name) = cluster_names.next() else { + info!("No xDS clusters configured"); + return Ok(()); + }; + + if let Ok(val) = Self::resolve_endpoint(&cluster_name, &node) { + break val; + } + + info!("Retrying XDS connection in {} seconds", RETRY_INTERVAL.as_secs()); + tokio::time::sleep(RETRY_INTERVAL).await; + }; + + let _xds_worker: ChildTask<_> = tokio::spawn(async move { + let subscribe = worker.run().await; + info!("Worker exited {subscribe:?}"); + }) + .into(); + + loop { + select! { + Some(xds_update) = client.recv() => { + info!("Got notification {xds_update:?}"); + let XdsUpdateEvent { ack_channel, updates } = xds_update; + // Box::pin because the future from self.process_updates() is very large + let rejected_updates = Box::pin(self.process_updates(updates)).await; + let _ = ack_channel.send(rejected_updates); + }, + Some(health_update) = self.health_updates_receiver.recv() => Self::process_health_event(&health_update), + else => break, + } + } + + self.health_manager.stop_all().await; + + Ok(()) + } + + async fn process_updates(&mut self, updates: Vec) -> Vec { + let mut rejected_updates = Vec::new(); + for update in updates { + match update { + XdsResourceUpdate::Update(id, resource) => { + if let Err(e) = self.process_update_event(&id, resource).await { + rejected_updates.push(RejectedConfig::from((id, e))); + } + }, + XdsResourceUpdate::Remove(id, resource) => { + if let Err(e) = self.process_remove_events(&id, resource).await { + rejected_updates.push(RejectedConfig::from((id, e))); + } + }, + } + } + rejected_updates + } + + async fn process_remove_events(&mut self, id: &str, resource: TypeUrl) -> Result<()> { + match resource { + orion_xds::xds::model::TypeUrl::Cluster => { + orion_lib::clusters::remove_cluster(id)?; + self.health_manager.stop_cluster(id).await; + Ok(()) + }, + orion_xds::xds::model::TypeUrl::Listener => { + let change = ListenerConfigurationChange::Removed(id.to_owned()); + let _ = send_change_to_runtimes(&self.listeners_senders, change).await; + Ok(()) + }, + orion_xds::xds::model::TypeUrl::ClusterLoadAssignment => { + orion_lib::clusters::remove_cluster_load_assignment(id)?; + self.health_manager.stop_cluster(id).await; + Ok(()) + }, + orion_xds::xds::model::TypeUrl::RouteConfiguration => { + let change = RouteConfigurationChange::Removed(id.to_owned()); + let _ = send_change_to_runtimes(&self.route_senders, change).await; + Ok(()) + }, + orion_xds::xds::model::TypeUrl::Secret => { + let msg = "Secret removal is not supported"; + warn!("{msg}"); + Err(msg.into()) + }, + } + } + + #[allow(clippy::too_many_lines)] + async fn process_update_event(&mut self, _: &str, resource: XdsResourcePayload) -> Result<()> { + match resource { + XdsResourcePayload::Listener(id, listener) => { + debug!("Got update for listener {id} {:?}", listener); + let factory = ListenerFactory::try_from(ConversionContext::new((listener, &self.secret_manager))); + + match factory { + Ok(factory) => { + let change = ListenerConfigurationChange::Added(factory); + let _ = send_change_to_runtimes(&self.listeners_senders, change).await; + Ok(()) + }, + Err(err) => { + warn!("Got invalid update for listener {id}"); + Err(err) + }, + } + }, + XdsResourcePayload::Cluster(id, cluster) => { + debug!("Got update for cluster: {id}: {:#?}", cluster); + let cluster_builder = PartialClusterType::try_from((cluster, &self.secret_manager)); + match cluster_builder { + Ok(cluster) => self.add_cluster(cluster).await, + Err(err) => { + warn!("Got invalid update for cluster {id}"); + Err(err) + }, + } + }, + XdsResourcePayload::RouteConfiguration(id, route) => { + debug!("Got update for route configuration {id}: {:#?}", route); + let change = RouteConfigurationChange::Added((id.clone(), route)); + let _ = send_change_to_runtimes(&self.route_senders, change).await; + Ok(()) + }, + XdsResourcePayload::Endpoints(id, cla) => { + debug!("Got update for cluster load assignment {id}: {:#?}", cla); + let cla = PartialClusterLoadAssignment::try_from(cla); + + match cla { + Ok(cla) => { + let cluster_name = id.clone(); + let cluster_config = orion_lib::clusters::change_cluster_load_assignment(&cluster_name, &cla)?; + self.health_manager.restart_cluster(cluster_config).await; + Ok(()) + }, + Err(err) => { + warn!("Got invalid update for cluster load assignment {id}"); + Err(err) + }, + } + }, + XdsResourcePayload::Secret(id, secret) => { + debug!("Got update for secret {id}: {:#?}", secret); + let res = self.secret_manager.add(secret); + + match res { + Ok(secret) => { + let cluster_configs = orion_lib::clusters::update_tls_context(&id, &secret)?; + for cluster_config in cluster_configs { + self.health_manager.restart_cluster(cluster_config).await; + } + let change = ListenerConfigurationChange::TlsContextChanged((id.clone(), secret)); + let _ = send_change_to_runtimes(&self.listeners_senders, change).await; + Ok(()) + }, + Err(err) => { + warn!("Got invalid update for cluster load assignment {id}"); + Err(err) + }, + } + }, + } + } + + async fn add_cluster(&mut self, cluster: PartialClusterType) -> Result<()> { + let cluster_config = orion_lib::clusters::add_cluster(cluster)?; + self.health_manager.restart_cluster(cluster_config).await; + Ok(()) + } + + fn process_health_event(health_update: &EndpointHealthUpdate) { + orion_lib::clusters::update_endpoint_health( + &health_update.endpoint.cluster, + &health_update.endpoint.endpoint, + health_update.health, + ); + } +} + +async fn send_change_to_runtimes(channels: &[Sender], change: Change) -> Result<()> { + let futures: Vec<_> = channels + .iter() + .map(|f| { + let change = change.clone(); + f.send(change) + }) + .collect(); + let _ = join_all(futures).await; + Ok(()) +} diff --git a/orion-proxy/tests/configs.rs b/orion-proxy/tests/configs.rs new file mode 100644 index 00000000..57600996 --- /dev/null +++ b/orion-proxy/tests/configs.rs @@ -0,0 +1,46 @@ +use orion_configuration::config::{Config, Runtime}; +use orion_configuration::options::Options; +use orion_lib::configuration::get_listeners_and_clusters; +use orion_lib::RUNTIME_CONFIG; +use std::path::{Path, PathBuf}; +use std::sync::Mutex; +use tracing_test::traced_test; + +/// we cannot run the tests concurrently because some of them rely on +/// the current working directory ($PWD). This function is just a wrapper with +/// a lock. +fn with_current_dir(p: &Path, f: F) -> T +where + F: FnOnce() -> T, +{ + static TEST_CURRENT_DIR_MUTEX: Mutex<()> = Mutex::new(()); + let _guard = TEST_CURRENT_DIR_MUTEX.lock().expect("Failed to lock test mutex"); + let _ = RUNTIME_CONFIG.set(Runtime::default()); + let save = std::env::current_dir().expect("Failed to get current dir"); + std::env::set_current_dir(p).expect("Failed to set current dir"); + let r = f(); + std::env::set_current_dir(save).expect("Failed to restore current dir"); + r +} + +fn check_config_file(file_path: &str) -> Result<(), orion_error::Error> { + // file_path is relative to crate root + let bootstrap = Config::new(&Options::from_path_to_envoy(file_path))?.bootstrap; + // but anciliary files are stored in workspace root - adjust PWD + let d = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("..").canonicalize().expect("Failed to get cargo crate root"); + with_current_dir(&d, || get_listeners_and_clusters(bootstrap).map(|_| ())) +} + + +#[traced_test] +#[test] +fn bootstrap_demo_static() -> Result<(), orion_error::Error> { + check_config_file("conf/demo/demo-static.yaml") +} + +#[traced_test] +#[test] +fn bootstrap_demo_dynamic() -> Result<(), orion_error::Error> { + check_config_file("conf/demo/demo-dynamic.yaml") +} diff --git a/orion-xds/Cargo.toml b/orion-xds/Cargo.toml new file mode 100644 index 00000000..46f8fa2f --- /dev/null +++ b/orion-xds/Cargo.toml @@ -0,0 +1,47 @@ +[package] +edition = "2021" +name = "orion-xds" +publish = ["rust-inhuawei-com"] +version = "0.1.0" + +[dependencies] +bytes.workspace = true +futures.workspace = true +http.workspace = true +orion-configuration.workspace = true +orion-data-plane-api.workspace = true +orion-error.workspace = true +serde.workspace = true +tokio.workspace = true +tower.workspace = true +tracing.workspace = true + +async-stream = "0.3" +atomic-take = "1.1.0" + +thiserror = "2.0.11" +tokio-stream.workspace = true +uuid = { version = "1.7.0", features = ["v4"] } + +[dev-dependencies] +serde_yaml.workspace = true +tokio.workspace = true +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints.clippy] +clone_on_ref_ptr = "warn" +doc_markdown = "allow" +get_unwrap = "allow" +if_then_some_else_none = "warn" +missing_docs_in_private_items = "allow" +missing_errors_doc = "allow" +missing_panics_doc = "allow" +module_name_repetitions = "allow" +must_use_candidate = "allow" +pedantic = "warn" +print_stderr = "warn" +print_stdout = "warn" +str_to_string = "warn" +string_to_string = "warn" +todo = "deny" +unwrap_used = "deny" diff --git a/orion-xds/examples/client.rs b/orion-xds/examples/client.rs new file mode 100644 index 00000000..9fc0f725 --- /dev/null +++ b/orion-xds/examples/client.rs @@ -0,0 +1,49 @@ +use futures::future::select; +use orion_configuration::config::bootstrap::Node; +use orion_xds::{ + start_aggregate_client, + xds::model::{XdsResourcePayload, XdsResourceUpdate}, +}; +use std::future::IntoFuture; +use tracing::{debug, info}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::registry() + .with(tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info, orion_xds=debug".into())) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let (mut worker, mut client, _subscription_manager) = + start_aggregate_client(Node { id: "node1".into() }, "http://127.0.0.1:50051".parse()?).await?; + let xds_worker = tokio::spawn(async move { + let subscribe = worker.run().await; + info!("Worker exited {subscribe:?}"); + }); + + let xds_client = tokio::spawn(async move { + while let Some(notification) = client.recv().await { + debug!("Got notification {notification:?}"); + let _ = notification.ack_channel.send(vec![]); + + for update in notification.updates { + match update { + XdsResourceUpdate::Update(_id, resource) => match resource { + XdsResourcePayload::Listener(_id, resource) => { + info!("Got update for listener {resource:#?}"); + }, + XdsResourcePayload::Cluster(_id, resource) => { + info!("Got update for cluster {resource:#?}"); + }, + _ => {}, + }, + XdsResourceUpdate::Remove(_id, _resource) => {}, + } + } + } + }); + + let _ = select(xds_client.into_future(), xds_worker.into_future()).await; + Ok(()) +} diff --git a/orion-xds/examples/server.rs b/orion-xds/examples/server.rs new file mode 100644 index 00000000..a898a3a4 --- /dev/null +++ b/orion-xds/examples/server.rs @@ -0,0 +1,80 @@ +use orion_data_plane_api::envoy_data_plane_api::envoy::extensions::filters::network::http_connection_manager::v3::http_connection_manager::CodecType; +use orion_xds::xds::{resources, server::{start_aggregate_server, ServerAction}}; +use tracing::info; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use std::{future::IntoFuture, time::Duration}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::registry() + .with(tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info, orion_xds=debug".into())) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let (delta_resource_tx, delta_resources_rx) = tokio::sync::mpsc::channel(100); + let (_stream_resource_tx, stream_resources_rx) = tokio::sync::mpsc::channel(100); + let addr = "127.0.0.1:50051".parse()?; + + let grpc_server = tokio::spawn(async move { + info!("Server started"); + let res = start_aggregate_server(addr, delta_resources_rx, stream_resources_rx).await; + info!("Server stopped {res:?}"); + }); + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let _xds_resource_producer = tokio::spawn(async move { + loop { + let id = uuid::Uuid::new_v4().to_string(); + let listener_id = format!("Listener-{id}"); + let cluster_id = format!("Cluster-{id}"); + + let cluster = resources::create_cluster_with_endpoints( + &cluster_id, + "192.168.1.10:4000".parse().expect("we really should panic here if this is wrong"), + 2, + true, + ); + info!("Adding cluster {cluster_id}"); + let cluster_resource = resources::create_cluster_resource(&cluster); + + if delta_resource_tx.send(ServerAction::Add(cluster_resource.clone())).await.is_err() { + break; + }; + tokio::time::sleep(Duration::from_secs(5)).await; + let listener = resources::create_listener( + &listener_id, + "192.168.1.10:8000".parse().expect("we really should panic here if this is wrong"), + CodecType::Http1, + vec!["*".to_owned(), "example.com".to_owned()], + vec![(cluster_id.clone(), 1)], + ); + let listener_resource = resources::create_listener_resource(&listener); + info!("Adding listener {listener_resource:?}"); + if delta_resource_tx.send(ServerAction::Add(listener_resource)).await.is_err() { + break; + }; + tokio::time::sleep(Duration::from_secs(15)).await; + + info!("Removing cluster {cluster_id}"); + if delta_resource_tx.send(ServerAction::Remove(cluster_resource)).await.is_err() { + break; + }; + tokio::time::sleep(Duration::from_secs(5)).await; + let listener = resources::create_listener( + &listener_id, + "192.168.1.10:8000".parse().expect("we really should panic here if this is wrong"), + CodecType::Http1, + vec!["*".to_owned(), "example.com".to_owned()], + vec![(cluster_id, 1)], + ); + let listener_resource = resources::create_listener_resource(&listener); + info!("Removing listener {listener_resource:?}"); + if delta_resource_tx.send(ServerAction::Remove(listener_resource)).await.is_err() { + break; + }; + } + }); + + let _ = grpc_server.into_future().await; + Ok(()) +} diff --git a/orion-xds/examples/server_routes_and_loads.rs b/orion-xds/examples/server_routes_and_loads.rs new file mode 100644 index 00000000..eab08b0e --- /dev/null +++ b/orion-xds/examples/server_routes_and_loads.rs @@ -0,0 +1,73 @@ +use std::{future::IntoFuture, time::Duration}; + +use orion_xds::xds::{ + resources, + server::{start_aggregate_server, ServerAction}, +}; +use tracing::info; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::registry() + .with(tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info, orion_xds=debug".into())) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let (delta_resource_tx, delta_resources_rx) = tokio::sync::mpsc::channel(100); + let (_stream_resource_tx, stream_resources_rx) = tokio::sync::mpsc::channel(100); + let addr = "127.0.0.1:50051".parse()?; + + let grpc_server = tokio::spawn(async move { + info!("Server started"); + let res = start_aggregate_server(addr, delta_resources_rx, stream_resources_rx).await; + info!("Server stopped {res:?}"); + }); + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let _xds_resource_producer = tokio::spawn(async move { + // needs to match ../orion-proxy/conf/orion-bootstap-minimal.yaml + let cluster_id = "cluster_http".to_owned(); + let route_id = "rds_route".to_owned(); + + let cla = resources::create_cluster_load_assignment( + &cluster_id, + "127.0.0.1:4001".parse().expect("We should panic here alright"), + 5, + ); + info!("Adding Cluster Load Assignment for cluster {cluster_id}"); + let load_assigment_resource = resources::create_load_assignment_resource(&cluster_id, &cla); + + if delta_resource_tx.send(ServerAction::Add(load_assigment_resource.clone())).await.is_err() { + return; + }; + tokio::time::sleep(Duration::from_secs(5)).await; + + info!("Adding Route configuration {route_id}"); + let route_configuration = + resources::create_route_resource(&route_id, vec!["*".to_owned()], "/".to_owned(), cluster_id.clone()); + let route_configuration_resource = + resources::create_route_configuration_resource(&route_id, &route_configuration); + + if delta_resource_tx.send(ServerAction::Add(route_configuration_resource.clone())).await.is_err() { + return; + }; + + tokio::time::sleep(Duration::from_secs(15)).await; + + info!("Removing cluster load assignment {cluster_id}"); + if delta_resource_tx.send(ServerAction::Remove(load_assigment_resource)).await.is_err() { + return; + }; + tokio::time::sleep(Duration::from_secs(5)).await; + + info!("Removing route configuration {route_id}"); + if delta_resource_tx.send(ServerAction::Remove(route_configuration_resource)).await.is_err() { + return; + }; + tokio::time::sleep(Duration::from_secs(5)).await; + }); + + let _ = grpc_server.into_future().await; + Ok(()) +} diff --git a/orion-xds/examples/server_secret_rotation.rs b/orion-xds/examples/server_secret_rotation.rs new file mode 100644 index 00000000..5f100f52 --- /dev/null +++ b/orion-xds/examples/server_secret_rotation.rs @@ -0,0 +1,89 @@ +use std::{future::IntoFuture, time::Duration}; + +use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::core::v3::{data_source::Specifier, DataSource}, + extensions::transport_sockets::tls::v3::{secret, CertificateValidationContext}, +}; +use orion_xds::xds::{ + resources, + server::{start_aggregate_server, ServerAction}, +}; +use tracing::info; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::registry() + .with(tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info, orion_xds=debug".into())) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let (delta_resource_tx, delta_resources_rx) = tokio::sync::mpsc::channel(100); + let (_stream_resource_tx, stream_resources_rx) = tokio::sync::mpsc::channel(100); + let addr = "127.0.0.1:50051".parse()?; + + let grpc_server = tokio::spawn(async move { + info!("Server started"); + let res = start_aggregate_server(addr, delta_resources_rx, stream_resources_rx).await; + info!("Server stopped {res:?}"); + }); + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let _xds_resource_producer = tokio::spawn(async move { + // secret names needs to match ../orion-proxy/conf/orion-bootstap-sds.yaml + // we are trying to change secret beefcake_ca and listener_beefcake_ca to point to a different cert stores + // initially the proxy should terminate tls connection + // once the listener_beefcake_ca secret is rotated then the proxy should return 502 error as it can't set up tls to upstream + // once the beefcake_ca is rotated the proxy will return response from upstream + + // run curl like this + // ng3-proxy$ curl -vi --cacert test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem --cert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --key test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem --resolve athlone_2.beefcake.com:8443:127.0.0.1 https://athlone_2.beefcake.com:8443 + + let secret_id = "listener_beefcake_ca"; + let validation_context = CertificateValidationContext { + trusted_ca: Some(DataSource { + specifier: Some(Specifier::Filename( + //"./test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem" + "./test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem".to_owned(), + )), + ..Default::default() + }), + ..Default::default() + }; + let secret_type = secret::Type::ValidationContext(validation_context); + let secret = resources::create_secret(secret_id, secret_type); + info!("Adding downstream secret {secret_id}"); + let secret_resource = resources::create_secret_resource(secret_id, &secret); + + if delta_resource_tx.send(ServerAction::Add(secret_resource.clone())).await.is_err() { + return; + }; + + tokio::time::sleep(Duration::from_secs(15)).await; + + let secret_id = "beefcake_ca"; + let validation_context = CertificateValidationContext { + trusted_ca: Some(DataSource { + specifier: Some(Specifier::Filename( + //"./test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem" + "./test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem".to_owned(), + )), + ..Default::default() + }), + ..Default::default() + }; + let secret_type = secret::Type::ValidationContext(validation_context); + let secret = resources::create_secret(secret_id, secret_type); + info!("Adding upstream secret {secret_id}"); + let secret_resource = resources::create_secret_resource(secret_id, &secret); + + if delta_resource_tx.send(ServerAction::Add(secret_resource.clone())).await.is_err() { + return; + }; + + tokio::time::sleep(Duration::from_secs(15)).await; + }); + + let _ = grpc_server.into_future().await; + Ok(()) +} diff --git a/orion-xds/examples/server_secret_rotation_simple.rs b/orion-xds/examples/server_secret_rotation_simple.rs new file mode 100644 index 00000000..d4da5b5a --- /dev/null +++ b/orion-xds/examples/server_secret_rotation_simple.rs @@ -0,0 +1,58 @@ +use std::future::IntoFuture; + +use orion_data_plane_api::envoy_data_plane_api::envoy::{ + config::core::v3::{data_source::Specifier, DataSource}, + extensions::transport_sockets::tls::v3::{secret, CertificateValidationContext}, +}; +use orion_xds::xds::{resources, server::start_aggregate_server}; +use tracing::info; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::registry() + .with(tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info, orion_xds=debug".into())) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let (_, delta_resources_rx) = tokio::sync::mpsc::channel(100); + let (_stream_resource_tx, stream_resources_rx) = tokio::sync::mpsc::channel(100); + let addr = "127.0.0.1:50051".parse()?; + + let grpc_server = tokio::spawn(async move { + info!("Server started"); + let res = start_aggregate_server(addr, delta_resources_rx, stream_resources_rx).await; + info!("Server stopped {res:?}"); + }); + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let var_name = async move { + // the secret name needs to match ../orion-proxy/conf/orion-bootstap-sds-simple.yaml + // we are trying to change secret beefcake_ca to point to a different cert store + // initially the proxy should return 502 error as it can't set up tls to upstream + // once the secret is rotated the proxy will return response from upstream + + // run curl like this + // ng3-proxy$ curl -vi --cacert test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem --cert test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem --key test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem --resolve athlone_2.beefcake.com:8443:127.0.0.1 https://athlone_2.beefcake.com:8443 + + let secret_id = "beefcake_ca"; + let validation_context = CertificateValidationContext { + trusted_ca: Some(DataSource { + specifier: Some(Specifier::Filename( + //"./test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem" + "./test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem".to_owned(), + )), + ..Default::default() + }), + ..Default::default() + }; + let secret_type = secret::Type::ValidationContext(validation_context); + let secret = resources::create_secret(secret_id, secret_type); + info!("Adding upstream secret {secret_id}"); + let _secret_resource = resources::create_secret_resource(secret_id, &secret); + }; + let _xds_resource_producer = tokio::spawn(var_name); + + let _ = grpc_server.into_future().await; + Ok(()) +} diff --git a/orion-xds/src/lib.rs b/orion-xds/src/lib.rs new file mode 100644 index 00000000..fb14713d --- /dev/null +++ b/orion-xds/src/lib.rs @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod xds; + +pub use crate::xds::model::XdsError; +use crate::xds::{ + bindings::AggregatedDiscoveryType, + client::{DeltaDiscoveryClient, DiscoveryClientBuilder, RETRY_INTERVAL}, +}; +use http::{Request, Response}; +use orion_configuration::config::bootstrap::Node; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::aggregated_discovery_service_client::AggregatedDiscoveryServiceClient; +use orion_data_plane_api::envoy_data_plane_api::tonic; +use tonic::body::BoxBody; +use tonic::codegen::StdError as TonicError; +use tonic::transport::{Channel, Endpoint}; +use tower::Service; +use tracing::info; +use xds::client::{DeltaClientBackgroundWorker, DeltaDiscoverySubscriptionManager}; + +pub mod grpc_deps { + pub use orion_data_plane_api::envoy_data_plane_api::tonic::body::boxed as to_grpc_body; + pub use orion_data_plane_api::envoy_data_plane_api::tonic::body::BoxBody as GrpcBody; + pub use orion_data_plane_api::envoy_data_plane_api::tonic::codegen::StdError as Error; + pub use orion_data_plane_api::envoy_data_plane_api::tonic::{Response, Status}; + pub use orion_data_plane_api::envoy_data_plane_api::tonic_health; +} + +pub async fn start_aggregate_client( + node: Node, + configuration_service_address: tonic::transport::Uri, +) -> Result< + ( + DeltaClientBackgroundWorker>, + DeltaDiscoveryClient, + DeltaDiscoverySubscriptionManager, + ), + XdsError, +> { + info!("Starting xDS client: {:?}", configuration_service_address); + let endpoint = Endpoint::from(configuration_service_address); + let disovery_client = loop { + let endpoint = endpoint.clone(); + if let Ok(client) = AggregatedDiscoveryServiceClient::connect(endpoint).await { + break client; + } + info!("Server doesn't exist yet... retrying in {RETRY_INTERVAL:?}"); + tokio::time::sleep(RETRY_INTERVAL).await; + }; + + let aggregated_discovery_service_client = AggregatedDiscoveryType { underlying_client: disovery_client }; + + DiscoveryClientBuilder::new(node, aggregated_discovery_service_client).build() +} + +pub fn start_aggregate_client_no_retry_loop( + node: Node, + channel: C, +) -> Result< + (DeltaClientBackgroundWorker>, DeltaDiscoveryClient, DeltaDiscoverySubscriptionManager), + XdsError, +> +where + C: Service, Response = Response, Error = TonicError> + Send, + C::Future: Send, +{ + let underlying_client = AggregatedDiscoveryServiceClient::new(channel); + let aggregated_discovery_service_client = AggregatedDiscoveryType { underlying_client }; + DiscoveryClientBuilder::new(node, aggregated_discovery_service_client).build() +} diff --git a/orion-xds/src/xds/bindings.rs b/orion-xds/src/xds/bindings.rs new file mode 100644 index 00000000..7d69f7c1 --- /dev/null +++ b/orion-xds/src/xds/bindings.rs @@ -0,0 +1,239 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{future::Future, pin::Pin}; + +use model::TypeUrl; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::{ + cluster::v3::cluster_discovery_service_client::ClusterDiscoveryServiceClient, + discovery::v3::{ + aggregated_discovery_service_client::AggregatedDiscoveryServiceClient, DeltaDiscoveryRequest, + DeltaDiscoveryResponse, DiscoveryRequest, DiscoveryResponse, + }, + endpoint::v3::endpoint_discovery_service_client::EndpointDiscoveryServiceClient, + listener::v3::listener_discovery_service_client::ListenerDiscoveryServiceClient, + route::v3::route_discovery_service_client::RouteDiscoveryServiceClient, + secret::v3::secret_discovery_service_client::SecretDiscoveryServiceClient, +}; +use orion_data_plane_api::envoy_data_plane_api::tonic; +use tokio_stream::Stream; +use tonic::codegen::StdError; +use tonic::transport::Channel; + +use super::model; + +pub type DeltaDiscoveryResponseFuture<'a> = Pin< + Box< + dyn Future< + Output = std::result::Result< + tonic::Response>, + tonic::Status, + >, + > + Send + + 'a, + >, +>; + +pub type DiscoveryResponseFuture<'a> = Pin< + Box< + dyn Future< + Output = std::result::Result< + tonic::Response>, + tonic::Status, + >, + > + Send + + 'a, + >, +>; + +/// Abstracts over the variation in generated xDS clients +pub trait TypedXdsBinding { + fn type_url() -> Option; + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_>; + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_>; +} + +/// Handle to ADS client +#[derive(Debug)] +pub struct AggregatedDiscoveryType { + pub underlying_client: AggregatedDiscoveryServiceClient, +} + +impl TypedXdsBinding for AggregatedDiscoveryType +where + C: tonic::client::GrpcService + Send, + C::Error: Into, + C::ResponseBody: tonic::codegen::Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + C::Future: Send, +{ + fn type_url() -> Option { + None + } + + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_aggregated_resources(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_aggregated_resources(request)) + } +} + +#[derive(Debug)] +/// Handle to CDS client +pub struct ClusterDiscoveryType { + pub underlying_client: ClusterDiscoveryServiceClient, +} + +impl TypedXdsBinding for ClusterDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Cluster) + } + + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_clusters(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_clusters(request)) + } +} + +/// Handle to LDS Client +#[derive(Debug)] +pub struct ListenerDiscoveryType { + underlying_client: ListenerDiscoveryServiceClient, +} + +impl TypedXdsBinding for ListenerDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Listener) + } + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_listeners(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_listeners(request)) + } +} + +/// Handle to RDS Client +#[derive(Debug)] +pub struct RouteDiscoveryType { + underlying_client: RouteDiscoveryServiceClient, +} + +impl TypedXdsBinding for RouteDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::RouteConfiguration) + } + + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_routes(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_routes(request)) + } +} + +/// Handle to EDS Client +#[derive(Debug)] +pub struct EndpointDiscoveryType { + underlying_client: EndpointDiscoveryServiceClient, +} + +impl TypedXdsBinding for EndpointDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::ClusterLoadAssignment) + } + + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_endpoints(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_endpoints(request)) + } +} + +/// Handle to SDS Client +#[derive(Debug)] +pub struct SecretsDiscoveryType { + underlying_client: SecretDiscoveryServiceClient, +} + +impl TypedXdsBinding for SecretsDiscoveryType { + fn type_url() -> Option { + Some(TypeUrl::Secret) + } + + fn delta_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DeltaDiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.delta_secrets(request)) + } + + fn stream_request( + &mut self, + request: impl Stream + Send + 'static, + ) -> DiscoveryResponseFuture<'_> { + Box::pin(self.underlying_client.stream_secrets(request)) + } +} diff --git a/orion-xds/src/xds/client.rs b/orion-xds/src/xds/client.rs new file mode 100644 index 00000000..d1ee1e0d --- /dev/null +++ b/orion-xds/src/xds/client.rs @@ -0,0 +1,464 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use super::{ + bindings, + model::{RejectedConfig, ResourceId, ResourceVersion, TypeUrl, XdsError, XdsResourcePayload, XdsResourceUpdate}, +}; +use orion_configuration::config::bootstrap::Node; +use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::core::v3::Node as EnvoyNode, + service::discovery::v3::{DeltaDiscoveryRequest, DeltaDiscoveryResponse}, + }, + google::rpc::Status, + tonic, +}; +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; +use tokio::{ + sync::{mpsc, oneshot}, + time, +}; +use tracing::{debug, info, warn}; + +pub const INITIAL_BACKOFF: Duration = Duration::from_secs(1); +pub const MAX_BACKOFF: Duration = Duration::from_secs(20); +pub const BACKOFF_INTERVAL: Duration = Duration::from_secs(2); +pub const RETRY_INTERVAL: Duration = Duration::from_secs(5); +pub const ACK_TIMEOUT: Duration = Duration::from_secs(5); + +pub struct DiscoveryClientBuilder { + node: Node, + client_binding: C, + initial_subscriptions: HashMap>, + error: Option, +} + +impl DiscoveryClientBuilder +where + C: bindings::TypedXdsBinding, +{ + pub fn new(node: Node, client: C) -> DiscoveryClientBuilder { + DiscoveryClientBuilder { node, client_binding: client, initial_subscriptions: HashMap::new(), error: None } + } + + #[must_use] + pub fn subscribe_resource_name(mut self, resource_id: ResourceId) -> Self { + if let Some(type_url) = C::type_url() { + self = self.subscribe_resource_name_by_typeurl(resource_id, type_url); + } else { + self.error = Some("subscribe only works if typed binding provides a compatible type_url".to_owned()); + } + self + } + + fn subscribe_resource_name_by_typeurl(mut self, resource_id: ResourceId, type_url: TypeUrl) -> Self { + let configured_type_url = C::type_url(); + if configured_type_url.is_none() || configured_type_url.is_some_and(|type_is_set| type_is_set == type_url) { + self.initial_subscriptions.entry(type_url).or_default().insert(resource_id); + } else { + self.error = Some("can only subscribe by type_url when using a compatible typed binding".to_owned()); + } + self + } + + pub fn build( + self, + ) -> Result<(DeltaClientBackgroundWorker, DeltaDiscoveryClient, DeltaDiscoverySubscriptionManager), XdsError> + { + if let Some(err) = self.error { + Err(XdsError::BuilderFailed(err)) + } else { + let (subscription_updates_tx, subscription_updates_rx) = mpsc::channel::(100); + let (resource_updates_tx, resource_updates_rx) = mpsc::channel::(100); + Ok(( + DeltaClientBackgroundWorker { + node: self.node, + client_binding: self.client_binding, + initial_subscriptions: self.initial_subscriptions, + subscriptions_rx: subscription_updates_rx, + resources_tx: resource_updates_tx, + }, + DeltaDiscoveryClient { resources_rx: resource_updates_rx }, + DeltaDiscoverySubscriptionManager { subscriptions_tx: subscription_updates_tx }, + )) + } + } +} + +/// Incremental Client that operates the delta version of the xDS protocol +/// use to consume xDS configuration updates asychronously, modify resource subscriptions + +#[derive(Debug)] +pub struct DeltaDiscoveryClient { + resources_rx: mpsc::Receiver, +} + +#[derive(Debug)] +pub struct DeltaDiscoverySubscriptionManager { + subscriptions_tx: mpsc::Sender, +} + +impl DeltaDiscoveryClient { + pub async fn recv(&mut self) -> Option { + self.resources_rx.recv().await + } +} + +impl DeltaDiscoverySubscriptionManager { + pub async fn subscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> Result<(), XdsError> { + Ok(self.subscriptions_tx.send(SubscriptionEvent::Subscribe(type_url, resource_id)).await?) + } + + pub async fn unsubscribe(&self, resource_id: ResourceId, type_url: TypeUrl) -> Result<(), XdsError> { + Ok(self.subscriptions_tx.send(SubscriptionEvent::Unsubscribe(type_url, resource_id)).await?) + } +} + +#[derive(Debug)] +pub struct XdsUpdateEvent { + pub updates: Vec, + pub ack_channel: oneshot::Sender>, +} + +#[derive(Clone, Debug)] +pub enum SubscriptionEvent { + Subscribe(TypeUrl, ResourceId), + Unsubscribe(TypeUrl, ResourceId), +} + +/// Background worker that handles interactions with remote xDS services +#[derive(Debug)] +pub struct DeltaClientBackgroundWorker { + node: Node, + client_binding: C, + initial_subscriptions: HashMap>, + subscriptions_rx: mpsc::Receiver, + resources_tx: mpsc::Sender, +} + +impl DeltaClientBackgroundWorker { + pub async fn run(&mut self) -> Result<(), XdsError> { + let mut connection_id = 0; + + let mut state = DiscoveryClientState { + backoff: INITIAL_BACKOFF, + tracked: HashMap::new(), + subscriptions: self.initial_subscriptions.clone(), + }; + loop { + connection_id += 1; + debug!(connection_id, "starting xDS (re)connect cycle {:?}", state.backoff); + self.persistently_connect(&mut state).await; + } + } +} + +#[derive(Debug)] +struct DiscoveryClientState { + backoff: Duration, + tracked: HashMap>, + subscriptions: HashMap>, +} + +impl DiscoveryClientState { + fn reset_backoff(&mut self) { + debug!("XDS client connection backoff has been reset"); + self.backoff = INITIAL_BACKOFF; + } +} + +impl DeltaClientBackgroundWorker { + async fn persistently_connect(&mut self, state: &mut DiscoveryClientState) { + match self.stream_resources(state).await { + Err(ref e @ XdsError::GrpcStatus(ref status)) => { + let backoff = std::cmp::min(MAX_BACKOFF, state.backoff * 2); + let err_detail = e.to_string(); + warn!("xDS client error: {err_detail:?}"); + if status.code() == tonic::Code::Unknown + || status.code() == tonic::Code::Cancelled + || status.code() == tonic::Code::DeadlineExceeded + || status.code() == tonic::Code::Unavailable + { + warn!("xDS client terminated: {}, retrying in {:?}", err_detail, backoff); + } else { + warn!("xDS client interupted: {}, retrying in {:?}", err_detail, backoff); + } + let backoff = std::cmp::min(MAX_BACKOFF, state.backoff * 2); + tokio::time::sleep(backoff).await; + state.backoff = backoff; + }, + Err(e) => { + let backoff = std::cmp::min(MAX_BACKOFF, state.backoff * 2); + let backoff_slowly = backoff + BACKOFF_INTERVAL; + warn!("xDS client error: {:?}, retrying in {:?}", e, backoff_slowly); + tokio::time::sleep(backoff_slowly).await; + state.backoff = backoff_slowly; + }, + Ok(()) => { + warn!("xDS client closed"); + }, + } + } + + async fn stream_resources(&mut self, state: &mut DiscoveryClientState) -> Result<(), XdsError> { + let (discovery_requests_tx, mut discovery_requests_rx) = mpsc::channel::(100); + + let resource_types = match C::type_url() { + Some(type_url) => vec![type_url], + _ => vec![ + TypeUrl::Secret, + TypeUrl::ClusterLoadAssignment, + TypeUrl::Cluster, + TypeUrl::RouteConfiguration, + TypeUrl::Listener, + ], + }; + let initial_requests: Vec = resource_types + .iter() + .map(|resource_type| { + let subscriptions = state.subscriptions.get(resource_type).cloned().unwrap_or_default(); + let already_tracked: HashMap = + state.tracked.get(resource_type).cloned().unwrap_or_default(); + DeltaDiscoveryRequest { + node: Some(EnvoyNode { id: self.node.id.clone().into_string(), ..Default::default() }), + type_url: resource_type.to_string(), + initial_resource_versions: already_tracked, + resource_names_subscribe: subscriptions.into_iter().collect(), + ..Default::default() + } + }) + .collect(); + + let outbound_requests = async_stream::stream! { + for request in initial_requests { + info!( + "sending initial discovery request {request:?}", + ); + + yield request; + } + while let Some(message) = discovery_requests_rx.recv().await { + info!( + "sending subsequent discovery request {message:?}", + ); + yield message + } + warn!("outbound discovery request stream has ended!"); + }; + + let mut response_stream = + self.client_binding.delta_request(outbound_requests).await.map_err(XdsError::GrpcStatus)?.into_inner(); + info!("xDS stream established"); + state.reset_backoff(); + + loop { + tokio::select! { + Some(event) = self.subscriptions_rx.recv() => { + self.process_subscription_event(event, state, &discovery_requests_tx).await; + } + discovered = response_stream.message() => { + let payload = discovered?; + let discovery_response = payload.ok_or(XdsError::UnknownResourceType("empty payload received".to_owned()))?; + self.process_and_acknowledge(discovery_response, &discovery_requests_tx, state).await?; + }, + else => { + warn!("All channels are closed...exiting"); + return Ok(()) + } + } + } + } + + async fn process_subscription_event( + &self, + event: SubscriptionEvent, + state: &mut DiscoveryClientState, + discovery_requests_tx: &tokio::sync::mpsc::Sender, + ) { + match event { + SubscriptionEvent::Subscribe(type_url, resource_id) => { + debug!(type_url = type_url.to_string(), resource_id, "processing new subscription"); + let is_new = state.subscriptions.entry(type_url).or_default().insert(resource_id.clone()); + if is_new { + if let Err(err) = discovery_requests_tx + .send(DeltaDiscoveryRequest { + node: Some(EnvoyNode { id: self.node.id.clone().into_string(), ..Default::default() }), + type_url: type_url.to_string(), + resource_names_subscribe: vec![resource_id], + ..Default::default() + }) + .await + { + warn!("problems updating subscription: {:?}", err); + } + } + }, + SubscriptionEvent::Unsubscribe(type_url, resource_id) => { + debug!(type_url = type_url.to_string(), resource_id, "processing unsubscribe"); + let was_subscribed = state.subscriptions.entry(type_url).or_default().remove(resource_id.as_str()); + if was_subscribed { + if let Err(err) = discovery_requests_tx + .send(DeltaDiscoveryRequest { + node: Some(EnvoyNode { id: self.node.id.clone().into_string(), ..Default::default() }), + type_url: type_url.to_string(), + resource_names_unsubscribe: vec![resource_id], + ..Default::default() + }) + .await + { + warn!("problems updating subscription: {:?}", err); + } + } + }, + } + } + async fn process_and_acknowledge( + &mut self, + response: DeltaDiscoveryResponse, + acknowledgments_tx: &mpsc::Sender, + state: &mut DiscoveryClientState, + ) -> Result<(), XdsError> { + let type_url = TypeUrl::try_from(response.type_url.as_str())?; + let nonce = response.nonce.clone(); + info!(type_url = type_url.to_string(), size = response.resources.len(), "received config resources from xDS"); + + let (updates, mut pending_update_versions) = Self::map_updates(state, response, type_url); + let (internal_ack_tx, internal_ack_rx) = oneshot::channel::>(); + let notification = XdsUpdateEvent { updates, ack_channel: internal_ack_tx }; + self.resources_tx + .send(notification) + .await + .map_err(|e: mpsc::error::SendError| XdsError::InternalProcessingError(e.to_string()))?; + + tokio::select! { + ack = internal_ack_rx => { + match ack { + Ok(rejected_configs) => { + let error = if rejected_configs.is_empty() { + debug!( + type_url = type_url.to_string(), + nonce, + "sending ack response after processing", + ); + let tracked_resources = state.tracked.entry(type_url).or_default(); + for (resource_id, resource_version) in pending_update_versions.drain() { + tracked_resources.insert(resource_id, resource_version); + } + None + } else { + let error = rejected_configs + .into_iter() + .map(|reject| reject.to_string()) + .collect::>() + .join("; "); + debug!( + type_url = type_url.to_string(), + error, + nonce, + "rejecting configs with nack response", + ); + Some(Status { + message: error, + code: tonic::Code::InvalidArgument.into(), + ..Default::default() + }) + }; + if let Err(err) = acknowledgments_tx.send(DeltaDiscoveryRequest { + type_url: type_url.to_string(), + response_nonce: nonce, + error_detail: error, + ..Default::default() + }) + .await + { + warn!("error in send xDS ack/nack upstream {:?}", err); + } + }, + Err(err) => { + warn!("error in reading internal ack/nack {:?}", err); + }, + } + } + () = time::sleep(ACK_TIMEOUT) => { + warn!("timed out while waiting to acknowledge config updates"); + let error = pending_update_versions.into_keys() + .collect::>() + .join("; "); + let error = Status { + message: error, + ..Default::default() + }; + let _ = acknowledgments_tx.send(DeltaDiscoveryRequest { + type_url: type_url.to_string(), + response_nonce: nonce, + error_detail: Some(error), + ..Default::default() + }) + .await; + } + } + + Ok(()) + } + + fn map_updates( + state: &mut DiscoveryClientState, + response: DeltaDiscoveryResponse, + type_url: TypeUrl, + ) -> (Vec, HashMap) { + let for_removal: Vec = response + .removed_resources + .iter() + .map(|resource_id| { + debug!("received delete for config resource {}", resource_id); + if let Some(resources) = state.tracked.get_mut(&type_url) { + resources.remove(resource_id); + } + resource_id.clone() + }) + .collect(); + + let mut pending_update_versions = HashMap::::new(); + + let updates = response + .resources + .into_iter() + .filter_map(|resource| { + let resource_id = resource.name.clone(); + let resource_version = resource.version.clone(); + let decoded = XdsResourcePayload::try_from((resource, type_url)); + if decoded.is_err() { + warn!("problem decoding config update for {} : error {:?}", resource_id, decoded.as_ref().err()); + } else { + pending_update_versions.insert(resource_id.clone(), resource_version); + debug!("decoded config update for resource {resource_id}"); + } + decoded.ok().map(|value| XdsResourceUpdate::Update(resource_id.clone(), value)) + }) + .chain(for_removal.into_iter().map(|resource_id| XdsResourceUpdate::Remove(resource_id, type_url))) + .collect(); + + (updates, pending_update_versions) + } +} diff --git a/orion-xds/src/xds/mod.rs b/orion-xds/src/xds/mod.rs new file mode 100644 index 00000000..2a31dc8d --- /dev/null +++ b/orion-xds/src/xds/mod.rs @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +pub mod bindings; +pub mod client; +pub mod model; + +pub mod resources; +pub mod server; diff --git a/orion-xds/src/xds/model.rs b/orion-xds/src/xds/model.rs new file mode 100644 index 00000000..0e62e465 --- /dev/null +++ b/orion-xds/src/xds/model.rs @@ -0,0 +1,180 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use orion_configuration::config::{ + cluster::Cluster, cluster::ClusterLoadAssignment, common::GenericError, listener::Listener, + network_filters::http_connection_manager::RouteConfiguration, secret::Secret, +}; +use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::{ + cluster::v3::Cluster as EnvoyCluster, endpoint::v3::ClusterLoadAssignment as EnvoyClusterLoadAssignment, + listener::v3::Listener as EnvoyListener, route::v3::RouteConfiguration as EnvoyRouteConfiguration, + }, + extensions::transport_sockets::tls::v3::Secret as EnvoySecret, + service::discovery::v3::Resource, + }, + prost, + prost::Message, + tonic, +}; +use serde::Deserialize; +use std::{ + fmt, + fmt::{Display, Formatter}, +}; +use thiserror::Error; +use tokio::sync::mpsc; + +pub type ResourceId = String; +pub type ResourceVersion = String; + +#[derive(Debug)] +pub enum XdsResourceUpdate { + Update(ResourceId, XdsResourcePayload), + Remove(ResourceId, TypeUrl), +} + +impl XdsResourceUpdate { + pub fn id(&self) -> ResourceId { + match self { + XdsResourceUpdate::Update(id, _) | XdsResourceUpdate::Remove(id, _) => id.to_string(), + } + } +} + +#[derive(Debug)] +pub enum XdsResourcePayload { + Listener(ResourceId, Listener), + Cluster(ResourceId, Cluster), + Endpoints(ResourceId, ClusterLoadAssignment), + RouteConfiguration(ResourceId, RouteConfiguration), + Secret(ResourceId, Secret), +} + +impl TryFrom<(Resource, TypeUrl)> for XdsResourcePayload { + type Error = XdsError; + + fn try_from((resource, type_url): (Resource, TypeUrl)) -> Result { + let resource_id = resource.name; + resource.resource.ok_or(XdsError::MissingResource()).and_then(|res| match type_url { + TypeUrl::Listener => { + let decoded = EnvoyListener::decode(res.value.as_slice())?.try_into()?; + Ok(XdsResourcePayload::Listener(resource_id, decoded)) + }, + TypeUrl::Cluster => { + let decoded = EnvoyCluster::decode(res.value.as_slice())?.try_into()?; + Ok(XdsResourcePayload::Cluster(resource_id, decoded)) + }, + TypeUrl::RouteConfiguration => { + let decoded = EnvoyRouteConfiguration::decode(res.value.as_slice())?.try_into()?; + Ok(XdsResourcePayload::RouteConfiguration(resource_id, decoded)) + }, + TypeUrl::ClusterLoadAssignment => { + let decoded = EnvoyClusterLoadAssignment::decode(res.value.as_slice())?.try_into()?; + Ok(XdsResourcePayload::Endpoints(resource_id, decoded)) + }, + TypeUrl::Secret => { + let decoded = EnvoySecret::decode(res.value.as_slice())?.try_into()?; + Ok(XdsResourcePayload::Secret(resource_id, decoded)) + }, + }) + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Deserialize)] +pub enum TypeUrl { + Listener, + Cluster, + RouteConfiguration, + ClusterLoadAssignment, + Secret, +} + +impl fmt::Display for TypeUrl { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + TypeUrl::Listener => "type.googleapis.com/envoy.config.listener.v3.Listener".to_owned(), + TypeUrl::Cluster => "type.googleapis.com/envoy.config.cluster.v3.Cluster".to_owned(), + TypeUrl::RouteConfiguration => + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration".to_owned(), + TypeUrl::ClusterLoadAssignment => + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment".to_owned(), + TypeUrl::Secret => "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret".to_owned(), + } + ) + } +} + +impl TryFrom<&str> for TypeUrl { + type Error = XdsError; + + fn try_from(type_url_string: &str) -> Result { + match type_url_string { + "type.googleapis.com/envoy.config.listener.v3.Listener" => Ok(TypeUrl::Listener), + "type.googleapis.com/envoy.config.cluster.v3.Cluster" => Ok(TypeUrl::Cluster), + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" => Ok(TypeUrl::RouteConfiguration), + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" => Ok(TypeUrl::ClusterLoadAssignment), + "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" => Ok(TypeUrl::Secret), + value => Err(XdsError::UnknownResourceType(value.to_owned())), + } + } +} + +#[derive(Debug)] +pub struct RejectedConfig { + name: ResourceId, + reason: orion_error::Error, +} +impl> From<(ResourceId, E)> for RejectedConfig { + fn from(context: (ResourceId, E)) -> RejectedConfig { + RejectedConfig { name: context.0, reason: context.1.into() } + } +} +impl Display for RejectedConfig { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}: {}", self.name, self.reason) + } +} + +#[derive(Error, Debug)] +pub enum XdsError { + #[error("gRPC error ({}): {}", .0.code(), .0.message())] + GrpcStatus(#[from] tonic::Status), + #[error("unknown resource type: {0}")] + UnknownResourceType(String), + #[error("error decoding xDS payload: {0}")] + Decode(#[from] prost::DecodeError), + #[error("malformed xDS payload, missing resource")] + MissingResource(), + #[error("problem occured during processing")] + InternalProcessingError(String), + #[error("cannot construct client: {0}")] + BuilderFailed(String), + #[error("failed to convert envoy type")] + ConversionError(#[from] GenericError), + #[error(transparent)] + Transport(#[from] tonic::transport::Error), + #[error("Failed to push xDS subscription event to channel")] + SubscriptionFailure(#[from] mpsc::error::SendError), +} diff --git a/orion-xds/src/xds/resources/converters.rs b/orion-xds/src/xds/resources/converters.rs new file mode 100644 index 00000000..4bd11a6a --- /dev/null +++ b/orion-xds/src/xds/resources/converters.rs @@ -0,0 +1,45 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::net::SocketAddr; + +use orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::{ + address, socket_address::PortSpecifier, SocketAddress, +}; + +pub struct SocketConverter; + +impl SocketConverter { + pub fn from(value: SocketAddr) -> address::Address { + let (protocol, ip, port) = match value { + SocketAddr::V4(s) => (0, s.ip().to_string(), u32::from(s.port())), + SocketAddr::V6(s) => (0, s.ip().to_string(), u32::from(s.port())), + }; + let address = SocketAddress { + protocol, + address: ip, + port_specifier: Some(PortSpecifier::PortValue(port)), + resolver_name: String::new(), + ipv4_compat: false, + network_namespace_filepath: String::new(), + }; + address::Address::SocketAddress(address) + } +} diff --git a/orion-xds/src/xds/resources/mod.rs b/orion-xds/src/xds/resources/mod.rs new file mode 100644 index 00000000..ebe7dde3 --- /dev/null +++ b/orion-xds/src/xds/resources/mod.rs @@ -0,0 +1,431 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::net::SocketAddr; + +use futures::Stream; +use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{ + config::{ + cluster::v3::{ + cluster::{ClusterDiscoveryType, DiscoveryType, LbPolicy}, + Cluster, + }, + core::v3::{ + socket_address::PortSpecifier, Address, Http1ProtocolOptions, Http2ProtocolOptions, + HttpProtocolOptions as ConfigHttpProtocolOptions, Node, + }, + endpoint::v3::{ + lb_endpoint::HostIdentifier, ClusterLoadAssignment, Endpoint, LbEndpoint, LocalityLbEndpoints, + }, + listener::v3::{filter::ConfigType, Filter, FilterChain, Listener}, + route::v3::{ + header_matcher::HeaderMatchSpecifier, route::Action, route_action::ClusterSpecifier, + route_match::PathSpecifier, weighted_cluster::ClusterWeight, HeaderMatcher, Route, RouteAction, + RouteConfiguration, RouteMatch, VirtualHost, WeightedCluster, + }, + }, + extensions::{ + filters::network::http_connection_manager::v3::{ + http_connection_manager::{CodecType, RouteSpecifier}, + HttpConnectionManager, + }, + transport_sockets::tls::v3::{secret, Secret}, + upstreams::http::v3::{ + http_protocol_options::{ + explicit_http_config::ProtocolConfig, ExplicitHttpConfig, UpstreamProtocolOptions, + }, + HttpProtocolOptions, + }, + }, + service::discovery::v3::{DeltaDiscoveryRequest, DiscoveryRequest, Resource}, + }, + google::protobuf::{Any, UInt32Value}, + prost::Message, +}; +use tokio_stream::StreamExt; + +use self::converters::SocketConverter; +use crate::xds::model::TypeUrl; + +pub mod converters; + +#[allow(deprecated)] +pub fn create_node(id: S1, cluster_name: S2) -> Node +where + S1: Into, + S2: Into, +{ + Node { + id: id.into(), + cluster: cluster_name.into(), + metadata: None, + locality: None, + user_agent_name: String::new(), + extensions: vec![], + client_features: vec![], + listening_addresses: vec![], + user_agent_version_type: None, + dynamic_parameters: std::collections::HashMap::new(), + } +} + +pub fn create_endpoints(endpoint_addrs: Vec
) -> LocalityLbEndpoints { + let lb_endpoints = endpoint_addrs.into_iter().map(create_endpoint).collect(); + LocalityLbEndpoints { + priority: 0, + // load_balancing_weight: Some(UInt32Value { value: 2 }), + lb_endpoints, + ..Default::default() + } +} + +pub fn create_endpoint(addr: Address) -> LbEndpoint { + // let health_check_config = + // HealthCheckConfig { address: Some(addr.clone()), disable_active_health_check: true, ..Default::default() }; + + LbEndpoint { + host_identifier: Some(HostIdentifier::Endpoint(Endpoint { + address: Some(addr), + // health_check_config: Some(health_check_config), + ..Default::default() + })), + load_balancing_weight: Some(UInt32Value { value: 1 }), + ..Default::default() + } +} + +pub fn create_addresses(addr: SocketAddr, count: u32) -> Vec
{ + let socket = SocketConverter::from(addr); + (0..count) + .map(|i| { + let mut socket = socket.clone(); + if let orion_data_plane_api::envoy_data_plane_api::envoy::config::core::v3::address::Address::SocketAddress(ref mut socket) = + socket + { + if let Some(PortSpecifier::PortValue(port)) = socket.port_specifier { + socket.port_specifier = Some(PortSpecifier::PortValue(i + port)); + } + } + Address { address: Some(socket) } + }) + .collect() +} + +pub fn create_cluster_with_endpoints( + name: &str, + endpoint_addr: SocketAddr, + endpoints: u32, + enable_http2: bool, +) -> Cluster { + let addresses = create_addresses(endpoint_addr, endpoints); + create_cluster(name, vec![create_endpoints(addresses)], enable_http2) +} + +pub fn create_cluster_load_assignment(name: &str, endpoint_addr: SocketAddr, endpoints: u32) -> ClusterLoadAssignment { + let addresses = create_addresses(endpoint_addr, endpoints); + let endpoints = vec![create_endpoints(addresses)]; + ClusterLoadAssignment { cluster_name: name.to_owned(), endpoints, ..Default::default() } +} +pub fn create_get_header_matcher(name: &str) -> HeaderMatcher { + create_header_matcher(name, "GET".to_owned()) +} +pub fn create_post_header_matcher(name: &str) -> HeaderMatcher { + create_header_matcher(name, "POST".to_owned()) +} + +pub fn create_header_matcher(name: &str, method: String) -> HeaderMatcher { + HeaderMatcher { + name: format!("{name}-header_matcher"), + header_match_specifier: Some(HeaderMatchSpecifier::ExactMatch(method)), + ..Default::default() + } +} + +pub fn create_route_match(name: &str, with_prefix: String) -> RouteMatch { + let name = format!("{name}-route_match"); + RouteMatch { + headers: vec![create_get_header_matcher(&name), create_post_header_matcher(&name)], + path_specifier: Some(PathSpecifier::Prefix(with_prefix)), + ..Default::default() + } +} + +pub fn create_route(name: &str, with_prefix: String, with_cluster: String) -> Route { + let name = format!("{name}-route"); + Route { + r#match: Some(create_route_match(&name, with_prefix)), + name, + action: Some(Action::Route(RouteAction { + cluster_specifier: Some(ClusterSpecifier::Cluster(with_cluster)), + ..Default::default() + })), + ..Default::default() + } +} +pub fn create_virtual_host(name: &str, domains: Vec, with_prefix: String, with_cluster: String) -> VirtualHost { + let name = format!("{name}-vc"); + VirtualHost { routes: vec![create_route(&name, with_prefix, with_cluster)], name, domains, ..Default::default() } +} + +pub fn create_route_resource( + name: &str, + domains: Vec, + with_prefix: String, + with_cluster: String, +) -> RouteConfiguration { + RouteConfiguration { + name: name.to_owned(), + virtual_hosts: vec![create_virtual_host(name, domains, with_prefix, with_cluster)], + ..Default::default() + } +} + +pub fn create_secret(name: &str, secret: secret::Type) -> Secret { + Secret { name: name.to_owned(), r#type: Some(secret) } +} + +pub fn create_cluster(name: &str, endpoints: Vec, enable_http2: bool) -> Cluster { + let load_assignment = ClusterLoadAssignment { cluster_name: name.to_owned(), endpoints, ..Default::default() }; + + let config = if enable_http2 { + let http2_protocol_options = + Http2ProtocolOptions { max_concurrent_streams: Some(UInt32Value { value: 10 }), ..Default::default() }; + ExplicitHttpConfig { protocol_config: Some(ProtocolConfig::Http2ProtocolOptions(http2_protocol_options)) } + } else { + let http1_protocol_options = Http1ProtocolOptions::default(); + ExplicitHttpConfig { protocol_config: Some(ProtocolConfig::HttpProtocolOptions(http1_protocol_options)) } + }; + + let options = HttpProtocolOptions { + upstream_protocol_options: Some(UpstreamProtocolOptions::ExplicitHttpConfig(config)), + common_http_protocol_options: Some(ConfigHttpProtocolOptions::default()), + ..Default::default() + }; + + let extension = Any { + type_url: "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions".to_owned(), + value: options.encode_to_vec(), + }; + + let mut extensions = std::collections::HashMap::new(); + extensions.insert("envoy.extensions.upstreams.http.v3.HttpProtocolOptions".to_owned(), extension); + + Cluster { + name: name.to_owned(), + cluster_discovery_type: Some(ClusterDiscoveryType::Type(DiscoveryType::Static.into())), + lb_policy: LbPolicy::RoundRobin.into(), + load_assignment: Some(load_assignment), + typed_extension_protocol_options: extensions, + ..Default::default() + } +} + +pub fn create_cluster_resource(cluster: &Cluster) -> Resource { + let value = cluster.encode_to_vec(); + let any = Any { type_url: TypeUrl::Cluster.to_string(), value }; + + let mut cluster_resource = Resource { ..Default::default() }; + cluster_resource.name.clone_from(&cluster.name); + cluster_resource.resource = Some(any); + cluster_resource +} + +pub fn create_load_assignment_resource(name: &str, load_assignment: &ClusterLoadAssignment) -> Resource { + let value = load_assignment.encode_to_vec(); + let any = Any { type_url: TypeUrl::ClusterLoadAssignment.to_string(), value }; + + let mut cla_resource = Resource { ..Default::default() }; + cla_resource.name.clone_from(&name.to_owned()); + cla_resource.resource = Some(any); + cla_resource +} + +pub fn create_route_configuration_resource(name: &str, load_assignment: &RouteConfiguration) -> Resource { + let value = load_assignment.encode_to_vec(); + let any = Any { type_url: TypeUrl::RouteConfiguration.to_string(), value }; + + let mut route_resource = Resource { ..Default::default() }; + route_resource.name.clone_from(&name.to_owned()); + route_resource.resource = Some(any); + route_resource +} + +pub fn create_secret_resource(name: &str, secret: &Secret) -> Resource { + let value = secret.encode_to_vec(); + let any = Any { type_url: TypeUrl::Secret.to_string(), value }; + + let mut route_resource = Resource { ..Default::default() }; + route_resource.name.clone_from(&name.to_owned()); + route_resource.resource = Some(any); + route_resource +} + +pub fn create_listener( + name: &str, + addr: SocketAddr, + codec_type: CodecType, + domains: Vec, + mut cluster_names: Vec<(String, u32)>, +) -> Listener { + let route_match = RouteMatch { path_specifier: Some(PathSpecifier::Path("/".to_owned())), ..Default::default() }; + + let cluster_action = if cluster_names.len() == 1 { + let cluster_name = cluster_names.remove(0).0; + RouteAction { cluster_specifier: Some(ClusterSpecifier::Cluster(cluster_name)), ..Default::default() } + } else { + let clusters: Vec<_> = cluster_names + .into_iter() + .map(|(name, weight)| ClusterWeight { + name, + weight: Some(UInt32Value { value: weight }), + ..Default::default() + }) + .collect(); + + RouteAction { + cluster_specifier: Some(ClusterSpecifier::WeightedClusters(WeightedCluster { + clusters, + ..Default::default() + })), + ..Default::default() + } + }; + + let vc_name = format!("{name}-vc"); + let virtual_host_route = Route { + name: format!("{vc_name}-route"), + r#match: Some(route_match), + action: Some(Action::Route(cluster_action)), + ..Default::default() + }; + + let virtual_host = VirtualHost { name: vc_name, domains, routes: vec![virtual_host_route], ..Default::default() }; + + let http_connection_manager = HttpConnectionManager { + codec_type: codec_type.into(), + route_specifier: Some(RouteSpecifier::RouteConfig(RouteConfiguration { + name: format!("{name}-route-conf"), + virtual_hosts: vec![virtual_host], + ..Default::default() + })), + ..Default::default() + }; + + let http_connection_manager_any = Any { + type_url: + "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + .to_owned(), + value: http_connection_manager.encode_to_vec().into(), + }; + + let http_connection_manager_filter = Filter { + name: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + .to_owned(), + config_type: Some(ConfigType::TypedConfig(http_connection_manager_any)), + }; + let filter_chain = + FilterChain { name: format!("{name}-fc"), filters: vec![http_connection_manager_filter], ..Default::default() }; + + Listener { + name: name.to_owned(), + address: Some(Address { address: Some(SocketConverter::from(addr)) }), + filter_chains: vec![filter_chain], + ..Default::default() + } +} + +pub fn create_listener_resource(listener: &Listener) -> Resource { + let value = listener.encode_to_vec(); + let any = Any { type_url: TypeUrl::Listener.to_string(), value }; + + let mut listener_resource = Resource { ..Default::default() }; + listener_resource.name.clone_from(&listener.name); + listener_resource.resource = Some(any); + listener_resource +} + +pub fn dicovery_request_stream() -> impl Stream { + tokio_stream::iter(1..usize::MAX).throttle(std::time::Duration::from_secs(5)).map(|i| DiscoveryRequest { + version_info: "v3".to_owned(), + node: None, + resource_names: vec![], + resource_locators: vec![], + type_url: TypeUrl::Cluster.to_string(), + response_nonce: format!("nonce {i}"), + error_detail: None, + }) +} + +pub fn delta_dicovery_request_stream() -> impl Stream { + tokio_stream::iter(1..usize::MAX).throttle(std::time::Duration::from_secs(5)).map(|i| DeltaDiscoveryRequest { + node: None, + type_url: TypeUrl::Cluster.to_string(), + response_nonce: format!("nonce {i}"), + error_detail: None, + resource_names_subscribe: vec![], + resource_names_unsubscribe: vec![], + resource_locators_subscribe: vec![], + resource_locators_unsubscribe: vec![], + initial_resource_versions: std::collections::HashMap::new(), + }) +} + +#[cfg(test)] +mod test { + use orion_data_plane_api::decode::from_yaml; + use orion_data_plane_api::envoy_data_plane_api::{ + envoy::{config::cluster::v3::Cluster, service::discovery::v3::Resource}, + google::protobuf::Any, + prost::{self, Message}, + }; + use tracing::info; + + use crate::xds::model::{TypeUrl, XdsResourcePayload}; + + #[test] + fn test_cluster_conversion() { + const CLUSTER: &str = r#" +name: cluster1 +type: STATIC +load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 192.168.2.10 + port_value: 80 +"#; + let cluster: Cluster = from_yaml(CLUSTER).unwrap(); + + let mut value: Vec = vec![]; + cluster.encode(&mut value).unwrap(); + let any = Any { type_url: TypeUrl::Cluster.to_string(), value }; + + let resource = Resource { name: "cluster__111".to_owned(), resource: Some(any), ..Default::default() }; + let mut buf: Vec = vec![]; + cluster.encode(&mut buf).unwrap(); + let prost_buf = prost::bytes::Bytes::from(buf); + let decoded = Cluster::decode(prost_buf).unwrap(); + info!("decoded {decoded:?}"); + XdsResourcePayload::try_from((resource, TypeUrl::Cluster)).unwrap(); + } +} diff --git a/orion-xds/src/xds/server.rs b/orion-xds/src/xds/server.rs new file mode 100644 index 00000000..17c71690 --- /dev/null +++ b/orion-xds/src/xds/server.rs @@ -0,0 +1,211 @@ +// SPDX-FileCopyrightText: © 2025 Huawei Cloud Computing Technologies Co., Ltd +// SPDX-License-Identifier: Apache-2.0 +// +// Copyright 2025 Huawei Cloud Computing Technologies Co., Ltd +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +use std::{net::SocketAddr, pin::Pin}; + +use atomic_take::AtomicTake; +use orion_data_plane_api::envoy_data_plane_api::envoy::service::discovery::v3::{ + aggregated_discovery_service_server::{AggregatedDiscoveryService, AggregatedDiscoveryServiceServer}, + DeltaDiscoveryRequest, DeltaDiscoveryResponse, DiscoveryRequest, DiscoveryResponse, Resource, ResourceName, +}; +use orion_data_plane_api::envoy_data_plane_api::tonic::{ + self, transport::Server, IntoStreamingRequest, Response, Status, +}; +use tokio::sync::mpsc::{self, Receiver}; +use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt}; +use tracing::info; + +use crate::xds::{self, model::XdsError}; + +pub enum ServerAction { + Add(Resource), + Remove(Resource), +} + +pub type ResourceAction = ServerAction; +#[derive(Debug)] +pub struct AggregateServer { + delta_resources_rx: AtomicTake>, + stream_resources_rx: AtomicTake>, +} + +impl AggregateServer { + pub fn new(delta_resources_rx: Receiver, stream_resources_rx: Receiver) -> Self { + Self { + delta_resources_rx: AtomicTake::new(delta_resources_rx), + stream_resources_rx: AtomicTake::new(stream_resources_rx), + } + } +} + +type AggregatedDiscoveryServiceResult = std::result::Result, Status>; + +#[tonic::async_trait] +impl AggregatedDiscoveryService for AggregateServer { + type StreamAggregatedResourcesStream = + Pin> + Send>>; + + async fn stream_aggregated_resources( + &self, + req: tonic::Request>, + ) -> AggregatedDiscoveryServiceResult { + info!("AggregateServer::stream_aggregated_resources"); + info!("\tclient connected from: {:?}", req.remote_addr()); + + let (tx, rx) = mpsc::channel(128); + let mut resources_rx = + self.stream_resources_rx.take().ok_or(Status::internal("Resource stream is unavailable"))?; + tokio::spawn(async move { + while let Some(action) = resources_rx.recv().await { + let item = match action { + xds::server::ServerAction::Add(resource) => { + let Some(resource) = resource.resource else { + continue; + }; + DiscoveryResponse { + type_url: resource.type_url.clone(), + resources: vec![resource], + nonce: uuid::Uuid::new_v4().to_string(), + ..Default::default() + } + }, + xds::server::ServerAction::Remove(resource) => { + let Some(resource) = resource.resource else { + continue; + }; + DiscoveryResponse { + type_url: resource.type_url, + nonce: uuid::Uuid::new_v4().to_string(), + ..Default::default() + } + }, + }; + + match tx.send(std::result::Result::<_, Status>::Ok(item)).await { + Ok(()) => { + // item (server response) was queued to be send to client + }, + Err(_item) => { + // output_stream was build from rx and both are dropped + break; + }, + } + info!("\tclient disconnected"); + } + }); + + let mut incoming_stream = req.into_streaming_request().into_inner(); + tokio::spawn(async move { + while let Some(item) = incoming_stream.next().await { + info!("Sever : Got item {item:?}"); + } + info!("Sever sice closed"); + }); + + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::StreamAggregatedResourcesStream)) + } + + type DeltaAggregatedResourcesStream = + Pin> + Send>>; + + async fn delta_aggregated_resources( + &self, + req: tonic::Request>, + ) -> AggregatedDiscoveryServiceResult { + info!("AggregateServer::delta_aggregated_resources"); + info!("\tclient connected from: {:?}", req.remote_addr()); + + // spawn and channel are required if you want handle "disconnect" functionality + // the `out_stream` will not be polled after client disconnect + let (tx, rx) = mpsc::channel(128); + let mut resources_rx = self.delta_resources_rx.take().ok_or(Status::internal("Delta stream is unavailable"))?; + tokio::spawn(async move { + while let Some(action) = resources_rx.recv().await { + let item = match action { + xds::server::ServerAction::Add(r) => { + let Some(ref resource) = r.resource else { + continue; + }; + DeltaDiscoveryResponse { + type_url: resource.type_url.clone(), + resources: vec![r], + nonce: uuid::Uuid::new_v4().to_string(), + system_version_info: "system_version_info".to_owned(), + ..Default::default() + } + }, + xds::server::ServerAction::Remove(r) => { + let Some(resource) = r.resource else { + continue; + }; + DeltaDiscoveryResponse { + type_url: resource.type_url.clone(), + nonce: uuid::Uuid::new_v4().to_string(), + system_version_info: "system_version_info".to_owned(), + removed_resource_names: vec![ResourceName { + name: r.name.clone(), + dynamic_parameter_constraints: None, + }], + removed_resources: vec![r.name], + ..Default::default() + } + }, + }; + + match tx.send(std::result::Result::<_, Status>::Ok(item)).await { + Ok(()) => { + // item (server response) was queued to be send to client + }, + Err(_item) => { + // output_stream was build from rx and both are dropped + break; + }, + } + } + info!("\tclient disconnected"); + }); + + let mut incoming_stream = req.into_streaming_request().into_inner(); + tokio::spawn(async move { + while let Some(Ok(item)) = incoming_stream.next().await { + info!("Sever : Got item {item:?}"); + } + info!("Sever side closed"); + }); + + let output_stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(output_stream) as Self::DeltaAggregatedResourcesStream)) + } +} + +pub async fn start_aggregate_server( + addr: SocketAddr, + delta_resources_rx: Receiver, + stream_resources_rx: Receiver, +) -> Result<(), XdsError> { + info!("Server started {addr:?}"); + let server = AggregateServer::new(delta_resources_rx, stream_resources_rx); + let aggregate_server = AggregatedDiscoveryServiceServer::new(server); + let server = + Server::builder().concurrency_limit_per_connection(256).add_service(aggregate_server).serve(addr).await; + info!("Server exited {server:?}"); + Ok(()) +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000..be7ec811 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,8 @@ +use_small_heuristics = "MAX" +# In the match branch, if blocks are included, add commas (,) to separate them. +match_block_trailing_comma = true +newline_style = "Auto" +# Do not merge multiple Derive macros into the same line +merge_derives = false +# Set line length +max_width = 120 diff --git a/test_certs/beefcakeCA-gathered/beefcake-athlone.cert.pem b/test_certs/beefcakeCA-gathered/beefcake-athlone.cert.pem new file mode 100644 index 00000000..87184171 --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake-athlone.cert.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFPTCCAyWgAwIBAgICIAEwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgQmVlZkNha2UxETAP +BgNVBAsMCGJlZWZjYWtlMR4wHAYDVQQDDBViZWVmY2FrZSBJbnRlcm1lZGlhdGUw +HhcNMjQwNDExMTMzNDU0WhcNMjUwNDIxMTMzNDU0WjAbMRkwFwYDVQQDDBBiZWVm +Y2FrZS1hdGhsb25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrL3 +IIGV6UtGRVG7E2k0tnVw3fv+mMpAKwL8xldkmuu1ecv0rmtZrO/nMwIrhOuNzVqi +eZFPZWWPNK4hj1fKf+joC3xjMo+sNPvz1lPW7g04zrSeqrhgIPD1Se19LVHjOVK+ +z0PhEYHVxn/UrPD/Us+B2qg4PAi4A9TRYGczGp06TIudPG1Rd6BS9/4hgHJI5NbF +tYuzNc+eVjuH/5pVZsFU5a8soH1L3YyVC6nszvkwJ2V2/EUAPKjkLdf5yqySVWZa +wBvrtasRCqWjQy3jLlILkSmmgQ3dsXAZvtIWuyPBHgLxj6xt55lFUQ4Us+rjXDwR +k8+QEVhRgaEOdyRKewIDAQABo4IBNzCCATMwCQYDVR0TBAIwADAdBgNVHQ4EFgQU +0Q82dKP8HFPBxMcH/cuaftlEGXYwgZwGA1UdIwSBlDCBkYAUycazFgiBcRE2SSL7 +T0Ouo+bgWcuhdaRzMHExCzAJBgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjEP +MA0GA1UEBwwGRHVibGluMRgwFgYDVQQKDA9UZW5hbnQgQmVlZkNha2UxETAPBgNV +BAsMCGJlZWZjYWtlMREwDwYDVQQDDAhiZWVmY2FrZYICEAAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA5BgNVHREEMjAwghZh +dGhsb25lXzEuYmVlZmNha2UuY29tghZhdGhsb25lXzIuYmVlZmNha2UuY29tMA0G +CSqGSIb3DQEBCwUAA4ICAQBuSyNPleF9SYifUFmkIae/BYZLDwu9CCY9kh3msLJ0 +FSlmOa8WnFTt5aA8ZmoFrNdInd4Y9kgfAGO6/6NboISgW3V7aUX2jeHZj400Fu0N +TI/gbMMxidl9Y9+cailRxStBrgAf3DwgAS7ew+UmSp4n7hFeSG133h1+sOj4dFlv +3ofDJ+mXOKgtceSxvPI7Sb9bXWEBc/iUds8OZ7zyZBssrcvKoKtpNNmB4fYr8cFL +b1Ouwnrpm/9aZUuu4swtSxIuplrbWUpX+krTECXNtA2LFhxC/0XUuMuZMRo1+GwD +GJ41+/XByXM2oWU4I3As1qlqrqw3ZAkngDBYkwFrjaRfgqX+zwbtf62WpJLq+79k +VW8KzOL5cQKWQIYjlOM0S0zhSAY/izh3dgs2qZgUkd3e8IhLoYn2TijNMdLZuq9R +pcsXilBJWELFPzYz9DnwJzuEsrG1pYIfWYcO9ZMM2YYMpfhhRP7XqGj46M8sOaK/ +lOar83itTcy8bwtayISVYeL26m0QG1rPvjcnMXNSB+wRY6BOgn/KVm6l4/qIBUtt +mmtfINEamP5R5oOh4dydrt8LluPNY48dqiSYyuy4AjxEuCxp9s7rfDzzpZu+kUR8 +wraEzyH16K9K9GHyJGpwa5AZIqXDx2TcqURDYU+K9b7rhDwNlrpGlcvDWDxTLGUA +6w== +-----END CERTIFICATE----- diff --git a/test_certs/beefcakeCA-gathered/beefcake-athlone.key.pem b/test_certs/beefcakeCA-gathered/beefcake-athlone.key.pem new file mode 100644 index 00000000..4f169d43 --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake-athlone.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCusvcggZXpS0ZF +UbsTaTS2dXDd+/6YykArAvzGV2Sa67V5y/Sua1ms7+czAiuE643NWqJ5kU9lZY80 +riGPV8p/6OgLfGMyj6w0+/PWU9buDTjOtJ6quGAg8PVJ7X0tUeM5Ur7PQ+ERgdXG +f9Ss8P9Sz4HaqDg8CLgD1NFgZzManTpMi508bVF3oFL3/iGAckjk1sW1i7M1z55W +O4f/mlVmwVTlryygfUvdjJULqezO+TAnZXb8RQA8qOQt1/nKrJJVZlrAG+u1qxEK +paNDLeMuUguRKaaBDd2xcBm+0ha7I8EeAvGPrG3nmUVRDhSz6uNcPBGTz5ARWFGB +oQ53JEp7AgMBAAECggEADQDmPdsCG8Tjcfp07dYeuywVCXjzM6VDpHqgR7/ZOGIu +IRGU4Ly92+Ou4RBFdACNOV6zNFBOyXHcoyOzp3X17T0atJbUKBR4ptL64euzVlx7 +64BB79wxz5HlTX+UxIr8BQLLOtCceqSgPcUQbApePug84VffI7IpxviQLdhQqKeG +0nxok+YTGKJjRg64XkazzXIeqYGSObC+8byvNtyt2DTX7V1QTKAP16MDKGDZy8lI +NvDmm3zVSSApVlxvnvmqu0NmWYYOaqDz/TXA8YHLISffOk5FoNx/gamcyQVVmzeC +zXIRk77sSyXRJ/C8S6iAje8KrXfBAQebs6zBMefsmQKBgQDh4GesLuLb5JDnvSyd +OYKqjSMVIPCi3jgOhjr1T0D+xkdOsq7hYe4hwN0m4Flq92oEDaqmupwiw0wehccR +wuduOVDVmPIsaXn4xax/CKqoZBclTnZ6Oi3BxJC+B7yqSSRyymZKpbidK02mc1Ta +hduhJzDgwG6LQc26fZidRSscqQKBgQDF/1KH19jc3iz14aYM/ABdJcFGrjh4a4SZ +46qQGzFJSSjgV6dVeOEPK9m68Ux9/sG8SGmJ9VUafdNrccMvsdjn65Y6HwQs9a/7 +cPj+V71VfJ9pPPZU6iXKT/soQLYtQhqy54VwDp4W3GOa2wAWYKRo1by/JcPzkLfr +XEc7JrOggwKBgQCejTnYqFTAC9IXiTfu6TaqZ0XQB/5CJQBuWZS7KzWxYRcey9hs +BqT0doAGUQcTSbbWP42hWW5ODI67Aia5dfmj6oGuXZzoxeVDVeZ+iWPZreB9ZHVP +2NTTphn/+khZs9vs49wn2+oaERKBm4VdmwaPOYZazVkLUVaK6KyfALWBeQKBgQCj ++R5dV/nGeM1dWGHPUQfwHGtjYLdtqwVm+O/xZkO8ALW0jYMMOw6QJO2tb1a85bWG +e6XD1gSD6v6jddSC9FPBRg7k1Xj8zv/OSbI72Wf4mzvdyNnsWvmvAevrWyREeQFp +dmnA8qokHqLyxTneTmz6UUB+QQV1+j0G0yY+q3+JhwKBgFVt5iz9s+J06a8kU4IQ +F4YKK5liJsU2PTt/a5IjpukUAZVaIorsA95uC+cccIuIVryCmbT3fA4mDyY393oX +I+hl++m31+Sr0CQDxOmzFG64PAilgFACTL6hy08ng36naj+rnjqfZuG6LoT9ByJZ +Mv0LktQTDVJcrkX5OIxDeLic +-----END PRIVATE KEY----- diff --git a/test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem b/test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem new file mode 100644 index 00000000..d6bf3412 --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake-dublin.cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFOjCCAyKgAwIBAgICIAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgQmVlZkNha2UxETAP +BgNVBAsMCGJlZWZjYWtlMR4wHAYDVQQDDBViZWVmY2FrZSBJbnRlcm1lZGlhdGUw +HhcNMjQwNDExMTMzNDUzWhcNMjUwNDIxMTMzNDUzWjAaMRgwFgYDVQQDDA9iZWVm +Y2FrZS1kdWJsaW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh0v/d +4F4cjr+AXq6/hpT/E9UtS34lubi/37JaN70l1ViTQc+7OgdmFw+E0l9ky2ue7+dH +bRFfRMSr31PAUYmvU279h6Nc/KRVTxnUbKE9dj+nwiVqnw/1xQjc3XYZz7JRcTZN +iBexq9OKHFBCo3lscZ5k8XQLTV12FCXaVdrZ0hRZ1VJszCn4yIT+NEjmt1Fkm6Ew +EqDboG3FGHiuOGrWewlE46OmwgQ/Rd/QLK5A+7HtyIJ1GJQspCSmQdj077VkIzQ/ +nBzHqwlcSNvstqslTuE9hZ9epl6UIarUu3pCWoBALGUnuf79InBuyKKcFSW1fcM1 +fo0sEkMTvpHJ0LErAgMBAAGjggE1MIIBMTAJBgNVHRMEAjAAMB0GA1UdDgQWBBS2 +tT6prz27t7l/lD76s7I1EVYu2jCBnAYDVR0jBIGUMIGRgBTJxrMWCIFxETZJIvtP +Q66j5uBZy6F1pHMwcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8w +DQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1RlbmFudCBCZWVmQ2FrZTERMA8GA1UE +CwwIYmVlZmNha2UxETAPBgNVBAMMCGJlZWZjYWtlggIQADAOBgNVHQ8BAf8EBAMC +BeAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDcGA1UdEQQwMC6CFWR1 +Ymxpbl8xLmJlZWZjYWtlLmNvbYIVZHVibGluXzIuYmVlZmNha2UuY29tMA0GCSqG +SIb3DQEBCwUAA4ICAQBGnRhm/uN1pNPnmRLDU3z8mXBtXSsmj2ghnIJzcmNBXzom +Y2WNfQx52Td6K673B4E3Cke1zF9JWxk/7qFwJmmiHOv28GpwHtpAZ1UpOkyc7J1z +Qy5Zs1KlUbC6CYIcN8J9DDPU899ls1cpmdbLz4soHN2lF7IZEycK5JF/iuTwDKXY +1sDcdHLkxf4epOoxP1dwhQCKzEk6hwrVaTzrVUh2u7VxP+nLWUPhC0UL3DYrrOG5 +QX9yJjgB0cARRvXSy55Rojk5MvG5C6jgVfUA+zdOTFaBS/QfIPFu9IVcmkQnT4KG +WYXAmuAmwLzF0yNZEaR3A32O8yiuvve5aHPVU37+b0CGpju8Lol+oAyp8XPfCBzS +ATAq+gbUePKzXPZOMe2rng4FcL+nvmYLpV9jofbka+EnwIGeHyLHzwstcDninq6R +usf+hHoAHXtnfY1PkNZtatwVwbhabIgsuY1kpRPlri2OvvpKSRwDKBqQiBDMsXHG +xxKBD3SIv9RS8JKnoHYvdLvmG++4+rrrCB1UIdEDsNBtmrhiueigiv1wtei0bYrM +Cm2jX96FTopb4tFwkyPT6fT8eg9LA0wd0+Gte8r+6OC+By/RALefssfRkOpcTpO7 +SMjREzvxN5J6C6Qg2QfJdT08+kyL+jslo1lHBLQimbbeBX9TM1YXEenuIOPpWQ== +-----END CERTIFICATE----- diff --git a/test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem b/test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem new file mode 100644 index 00000000..3f9ab22d --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake-dublin.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCh0v/d4F4cjr+A +Xq6/hpT/E9UtS34lubi/37JaN70l1ViTQc+7OgdmFw+E0l9ky2ue7+dHbRFfRMSr +31PAUYmvU279h6Nc/KRVTxnUbKE9dj+nwiVqnw/1xQjc3XYZz7JRcTZNiBexq9OK +HFBCo3lscZ5k8XQLTV12FCXaVdrZ0hRZ1VJszCn4yIT+NEjmt1Fkm6EwEqDboG3F +GHiuOGrWewlE46OmwgQ/Rd/QLK5A+7HtyIJ1GJQspCSmQdj077VkIzQ/nBzHqwlc +SNvstqslTuE9hZ9epl6UIarUu3pCWoBALGUnuf79InBuyKKcFSW1fcM1fo0sEkMT +vpHJ0LErAgMBAAECgf8kjrDpmZupLUEV6aM7TAFc8/Nfctk3Vz2QGyOvkz1SeWAJ +Oao0Ib000wDZK1gMQpLi1Po//ePEIe+hjvzcoDmlN16w58jVY5MRsqJN2uc80KSQ +wXYHyudQHl23h6WmhG/u9iR5Chr63avBsk5vaimEK3wc/G8QHpTT7WP1GcWBxMIA +pgm80oV2rYh7iEi2ojJeaO2fP+h+AqgRi7+p16d9nwQB1J46lsK+aLBq1q7UnWvv +DA7YHprpuKhcgNVMOeLqlvy5+JIDM8eE7+9+IDki2A2I3HhcXgcRcy51fmR+gKoK +x/wCVe/1KPLv+y7nJnsRIfdu+Ky2PI47huC1kLUCgYEAttAJUjH2PB1FfS2iKPS/ +xGM0JBOsiMwZQahEnalAo7hEhq1X4S6JuuAdEGVk1Btdie2aKjl026ku2iQcEh1c +q0a2I0Hjv0PAhNdd22SnKDVk2PnkLCGDQsnxsFmsXatx9I4F9E15sEmZmCB9ke13 +0A5sWIZ5Bk/4jcjkcpGE2p0CgYEA4pvqzpEIbPXtSAbf4lyRZqHDIXz+rMDy9qUn +SvgF3UBn6U8wdjfbYD6vYMkXFeTaRghvTaXT5hw0DmJzHgo3exka3WyRbRTIw7kT +x0wH6onOov3srgOTAMdV+WiT05sPruj8RYP+JZBt9h9i4IDfgm5/MZV5nOs/oAjT +u1Dk7GcCgYAtOyXDP3dUAk4KHbtMdNnppj3lWzTKF+170D1B4PmMSHAz/jmLDm+n +XAH055Fd5XNsR+xn3BVsr9xWvq5vUPJ3siQobwoBbar5cyvWt32gNi3AfXuV66jF +Yp+46Ub7Wgr03hrdCjo8gslM2WUYMO/++0KVWJgjFrBBrF3Rdu2vBQKBgQDiDWdO +aJ9fEnSBRVpRFMZ2+ycMYQQskM3N/TIVNxWigH0n4DcXtuDAQcfxZKgCzyhudDuT +2bhn8/p53vu3G7n8si56c7MefX1IwXdZeB7teEsJjQRtEmOygybghdHxZN1gPkpp +jiIYltsainS4P9a25kM700BdqM7vdd7BHRFAQQKBgGXeJdzEaRzUupqCYE1D+dRR +Iz3Qz1D/MhyiTktUDInGDRaJgfx1/4d/wn/KccxhO+DRmRfjHkdyQTkdZLqqDaaH +7pvBmBb7xsomajgITFeT6xPPULikWhPDIfPRzDcYV9jPhN+5vt5oEPGo6pRJFtz/ +zCT7AIf2RzwReze0lOAv +-----END PRIVATE KEY----- diff --git a/test_certs/beefcakeCA-gathered/beefcake.ca.key.pem b/test_certs/beefcakeCA-gathered/beefcake.ca.key.pem new file mode 100644 index 00000000..f2ab9629 --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake.ca.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQC1+wklj3CXHX56 +h84mZlk4nI/FGBZRbaB15Y+3ZjIClF5OFiT2L1676bcOXVKCVovFfW0rA/w4Mpt/ +4xWvPVgwzlc+ga1WFu+dI3MWdqWujh1TuqUEZLDw9Uy0JAYyIvJWrkT7ap8TFIgI +CugvtsyMnwwMduPT568Ra/MYXfBEi0nYuymd2sQptwGmXQKWm97MTr++f2Rp5+TY +9BXH1P/TCSXmm0JyMI2VUhAZYXhwDOqZeix81TjI2fWWeCU8xtis2mfg6BH/uY/G +M2xCmOoZLZZAbbFEXKEV08pLXKaLqowRJfSW8cUTybnkJfEjSZxYJbw9rnDBxxGP +ouJ/tUqXxcJrd9K1umaiYbQogtC6lO1aiZJ+U/THqX6OAnmItVVkaXBudp3RSZ1o +4YAr3PoZBk7lJ45ASlYHQ1KW/EnTYX/ygxRRT0SkNnJciwYngds5YZrkQq9Q7+Mq +6hiedNEDVwOIacNzFSyeFMQPQst7prekJ0pJEHrZ1/K7Q7g8xEIftew0LyMV3KTZ +Y/BZ4TXRXzabJIsAEiujNoyX+8XT9kDeM/MpG05vXBv+dwv4oPVp7ejfskbASeU1 +uUX/5exGyKOpyInUqWROJla3EaXHa9J7cLiRz+dppxN3Eri09g5eYI1FFFAxlZEd +U0S1Oxf3kVOdtEtlGCb3+zDiXCmTkwIDAQABAoICAApeAzuHRW99U1+zW32d+H1B +AYKhGs6sekuT/7D/w9+YUa1WA5+dqT3SeSsfU+omZg81L52p4JnYJMyPVt/p3cAv +w218U8JegiKneH6SHmHxhLFWk7xcxCTvMktTyFhLJRd4gvpmCCrPY5/1GWYGedIn +UpLTyDzq9HL/fYKWlAH6g7oOIwBh57vYE1AhHch9TsOcDgeI89DLB1odU2adJRwm +jERZrEgbqW5ecSwQ/MEURrnBoHye91kNWbldwpS/qP5jXuYxVvQEv4vzNbqvQggJ +dT5H08npE03y40i2Yquzr77NNOSqzjmQguQle36fAV2XDndMilummlUVfy9AM8AC +38bIXQrmvC6hJj30gVynDfIxEhXk/FUPT9q7ckki5qH9kMQbpE82sT4idMS3hrnl ++hFbLN74V2wha1yp3sMlmAHp4N65nb9Lu3Q/WjL9PAAkKT0uj31ha6ntvSGucf2g +NBj8gwv6atsgdt49kVUp/XJkZH6XG/+AgrF+fh1JWuXB9+FNqHULg3vtbn/C4od9 +/vNcrG37a/SOI5NB1JIF07+3dpoUHVEm+wK3CP672dBT4PPZMpDNW23SyAW8T1Fw +RfpOW/wzQnwmvXv2HlBe24oaSh0JweI4fTIJZr+inzkp537JuUpQmpH1QJVg581C +/P5oDMUbFKEpK1ZFiko5AoIBAQC5+mZ6ATllWuNyvi3/lthzHhYqnp41nfSjy+Fp +rOFSNn3ZrtlXlYe2H7PYmkuLLWKMg34pEG2FNBUCjTgjmzNbbWZGuRebj6zzOVYg +hZ3d7j9eQphBTeO/RQmbAiF5ySbeRv4D8UTWxA0dqeOYsDb2E7V1RH0Dia1yWv5i +XpZAIgGaokBqeHdEBKpf8cMUPtzXaEtCOVvmQ/HkuhlUsOeWar2BCSXsSuMGqpmE +ObrqxksDuqZ77gL+mB7XLip51YTJRaWJ/R6vMd1hSQFb3ezQxAXzjJ23VMYpIQvZ +veuqNk5htzOT19r0YwaE8q7QyM+g3DRWXoVJ4GUOGw/8GCOnAoIBAQD6f1UjA2Jj +0j33KJVsQ4fZhLzGBPD2JzJLzIffjzULMcOWqzP7kv0nM8E4nhMeB9Ff2WCAtFB6 +Yx8HQignNhA+Wv0Gz35LYk82Bqs5rl2yxCnLEIDV2mwKS6O8LIS1baEjVKiGg/OC +PhD8w6Y+Bm66lvTyLgcLnln6iU8ce1oDFtfxO65y//x8cBF9EwJCESvCcIbmjhKO +GOzl3oGIt+XQStIqToOhEpDPNPVqiiTaGuQa9iUBDSsz3+ELKmFBs3NmEl3yNGq5 +D3FwoiED8CNI6NaDZX97aaWXjX3DaWHpMgwFBVUJAFVnFQ2PwooeX57HyeZroGlK +/2oGZDt1Mn41AoIBAQCQeGHu2NCbWZViZzYYXDfchTy28uz3vbJzI0gB7VhDoyz0 +Uw1nKjN4WPw+gO26OhFaEjOI9sf9N84yYz1++2tT1JIk0lfychPqBVpQfTt7UT5M +nPLll/oZdSnXDjqu/oGBHijpKx1MsJN3vaedzxWC5yCI7iEnBGaGdQUzqUKSo1gX +pX7gtp//aJhpW8ctDQBFHTGJSkGHiLR5988BqULd6CE6Ab9B5ZKidRg4TRhUlKFl +TjNSBjyxRb5/bc91QVabe8zyenzBn7zXe/zgPm9eZUgyP8OszN/6xz8LcKuHWBRf +eA9LA92FBv/jexot+jDARw8pINft4bDe2kr6jE31AoIBAQDk1joSeF8DzebpyVtn +wfFy1v4U8CKo3HnoIPrnUk4k4dtJ4zDj3A4EKBWWAWJ5hq7RHyY3rYHvrGmGGfvQ +hmUkcb7A4AECINc4BbOaSB4011tOtM7djmIwrZTwrMK3c2VHf2lNKXHlQ4oFPz65 +Xur87jbqnv20sX6Fj7aD0HQUamPvbpQZXp1rmH2Bg0IG1OTjBiVWredphH+2JySy +nz0EobWJDJzXIrzDbh/cqkKsNPC5aK+5XCxhgV528spSWPtM/e5w+02bre319Uav +8Eprqrl849YoCgVKg+mmALl/bUbpt0tNo85hEyyGbjP7N2M4JMuo+qv15YsYN6zR +jcUZAoIBAQCpry5G9HJYYliYz490hzGIp+I4USFpOBdTfJDCLWewQmAZuyCm1ZHF +9sy8CpX4Ksp/JJIMYcnQqItWjO0K9vQIeU1/WVyaprJsDCBkXaIE2lxFy+Fn5++E +EF8HzKSQevLtFQ4U1n99SsGD6M4eEW2ZfL4WP7jrLagsi40lTCibkfes48yKc7p/ +z9BKZMHD9Cc99RNE/ypTKIkgiQ69dRSd7W+4enHkVS070P0ygW2hCuxDT/LRCIZ8 +ToCr2EoGtBsab5nvuQLPKbtDjIE2BxPErCZZKT+4/i40x3hkDoFZtGJvj7L0QrQI +vVEAYRNEIcOT8tvs2Lx7wE+vUkvQe11e +-----END PRIVATE KEY----- diff --git a/test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem b/test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem new file mode 100644 index 00000000..c4083cba --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake.intermediate.ca-chain.cert.pem @@ -0,0 +1,67 @@ +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1Rl +bmFudCBCZWVmQ2FrZTERMA8GA1UECwwIYmVlZmNha2UxETAPBgNVBAMMCGJlZWZj +YWtlMB4XDTI0MDQxMTEzMzQ1M1oXDTM0MDQwOTEzMzQ1M1owbTELMAkGA1UEBhMC +SUUxETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgQmVlZkNha2Ux +ETAPBgNVBAsMCGJlZWZjYWtlMR4wHAYDVQQDDBViZWVmY2FrZSBJbnRlcm1lZGlh +dGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCfXyeKVpCcBvPcVito +0ZT6NB82Tff3K8nH1WotOyPKKq4lgS/q/59WbjNIdZ1i7XL1hWBndSaFqcSS4mP/ +awIqDKRn3KnKvg8xIvrzvIAW2fHtjwL94j76B6TH7lPo32JyQWuluYFzEZVaIGDn +6QnGBXW4vme1J1fCv4xm6jmp36sz0UL4zF/c53ef88qaAbwgxHyLoM+dyCJlG5Bs +Wa8Tae9vccDGMVz3VmSSUSlByGMRtGV0OgYueyQ3aLx54j94RrVDfyru8kITJ0nI +FcTdnG7wDOPbZIWGyyPb29NqgyyS+a1KzZidQZNjU4/hcE0aOrb4B/29QO6qytKA +PVIxtHg2V5p9VbuIYVIg865iZhozZ0/f24GDqTsc5zJMh4rdSkWEzZp1MR9sHzAe +DM10de5cidoVVKw5/kwdDPd4ulkKAjswjbGiYnWrNCvGydWljnWgnJ1W46vRUNzN +AOyx5TmXIOv+FkZrdsedPrmyVZqi7fIXuguO0sYlI+ZZqfWoM8Nu0awYjbfYeJP2 +qL1i+9s97sqaS7zfqc/Q2oP/kM1gEwd83pQKs3W1iA8c3RsXyikFbHVC9Ma3ljE6 +JLV7HxZyMD0sBU/nrxctjknEt24WHjc6WXtxtbaOSPqdzy5iipNxY/OGsgBc4dge +fngTZsyqQWW2Xy/K+zU8/cSpywIDAQABo2YwZDAdBgNVHQ4EFgQUycazFgiBcRE2 +SSL7T0Ouo+bgWcswHwYDVR0jBBgwFoAUwVIsIw4HTdS5VXLvL5fuNUjrRl0wEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggIBAI5w90ZjZVs9FdAV6gmlZXv0/a+ZJaHlCGpIp00Tcdx2WIp5ZFg52O8Y/Ncg +ynEp7KDAf7fW2u6uT4m3vCvqa6OcyzOnH7U5jsPkl2AM4GaFNmWG1LEVm0xYCRS9 +6JogbFDALbX7z6VvII4XOnRdi78+ALpQ6z4pZoJ8pbjAURoDD/B+VT9+lrqJT+YW +IHDkIgUYaRcJsbxOA/UYA8VwD9JBJiFQM1S2vLgk/VuokhcvfYrCK0q0+gIeLrCm +2F+Dr32AYeGy9KxgVC60IyHoD2dGvIv6XyEeM42Se3iJunFLOkt0OXmGRKxmAjXv +aCXh6JlOUgmPDwsYnLwH9+WWF9Nf/ypSIXQSyc+teUbwtNyp8z1tRT3tI8wvqjOs +Ocz8dLuj/+rk0slX4s6vrf9bXUOY1XMB43EABHxEEXcodENcQlhfmU6SKmXnygFU +knUtaRn+dcPXTl71ee8r/POcxxjGayQmwW7X8/DaZeDar7k/lyw6DPVspZh3S22o +8twB577Pcr8y+yOMYzmaS8f04jqjYYi/4RghUOnpWRVIkCpu5UWyWt4hGbKHuT7l +P/tk9Y0CizWLPSbkn0JsNUkQmfT5VAzchw9osJ4jOCy47G+lR55AggD0RQ0g06D4 +k+F/2Ir2a6Vp7ly7JXJ31mtWMDNTZL4sibcaT9UZ0PrHku9b +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0zCCA7ugAwIBAgIUMExeE4kaisqHJV+Gkg4LQc4FchgwDQYJKoZIhvcNAQEL +BQAwcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZE +dWJsaW4xGDAWBgNVBAoMD1RlbmFudCBCZWVmQ2FrZTERMA8GA1UECwwIYmVlZmNh +a2UxETAPBgNVBAMMCGJlZWZjYWtlMB4XDTI0MDQxMTEzMzQ1MloXDTQ0MDQwNjEz +MzQ1MlowcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQH +DAZEdWJsaW4xGDAWBgNVBAoMD1RlbmFudCBCZWVmQ2FrZTERMA8GA1UECwwIYmVl +ZmNha2UxETAPBgNVBAMMCGJlZWZjYWtlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAtfsJJY9wlx1+eofOJmZZOJyPxRgWUW2gdeWPt2YyApReThYk9i9e +u+m3Dl1SglaLxX1tKwP8ODKbf+MVrz1YMM5XPoGtVhbvnSNzFnalro4dU7qlBGSw +8PVMtCQGMiLyVq5E+2qfExSICAroL7bMjJ8MDHbj0+evEWvzGF3wRItJ2LspndrE +KbcBpl0ClpvezE6/vn9kaefk2PQVx9T/0wkl5ptCcjCNlVIQGWF4cAzqmXosfNU4 +yNn1lnglPMbYrNpn4OgR/7mPxjNsQpjqGS2WQG2xRFyhFdPKS1ymi6qMESX0lvHF +E8m55CXxI0mcWCW8Pa5wwccRj6Lif7VKl8XCa3fStbpmomG0KILQupTtWomSflP0 +x6l+jgJ5iLVVZGlwbnad0UmdaOGAK9z6GQZO5SeOQEpWB0NSlvxJ02F/8oMUUU9E +pDZyXIsGJ4HbOWGa5EKvUO/jKuoYnnTRA1cDiGnDcxUsnhTED0LLe6a3pCdKSRB6 +2dfyu0O4PMRCH7XsNC8jFdyk2WPwWeE10V82mySLABIrozaMl/vF0/ZA3jPzKRtO +b1wb/ncL+KD1ae3o37JGwEnlNblF/+XsRsijqciJ1KlkTiZWtxGlx2vSe3C4kc/n +aacTdxK4tPYOXmCNRRRQMZWRHVNEtTsX95FTnbRLZRgm9/sw4lwpk5MCAwEAAaNj +MGEwHQYDVR0OBBYEFMFSLCMOB03UuVVy7y+X7jVI60ZdMB8GA1UdIwQYMBaAFMFS +LCMOB03UuVVy7y+X7jVI60ZdMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgGGMA0GCSqGSIb3DQEBCwUAA4ICAQATrrJdISPLpQ4pmM/i4PCHaLfmEDpgqMZn +KM5LDmuq/RIK1t+WWiig5+5YbFPvp5Pcp4iVlIaoejcH0YNOSN7MWgDbsO+LIQHC +YeJ7GP8nuKxxvILW2djbpjPbIk+JWaENS5u2vu4HWPfFgVHbosvPZ1LDZ6Ts/UdO +VX5OYYEP4Xc6ny5TQIKzitEPAmRNRk98lYduyRxU8hu/fWMU3/0PSydx8dulAaXV +yVhIP6gZ0vJCNbv9aTxMFUMk2Pwms+iY43PvHxnDL7vNF2MS1qGVqVA1RnHp3Trr +F4UFiXwpPlRbhh2OC+ap6PYYFGIJ3J1OVkFgDGVCx9vhT348lklAvLPrwI5TmHAK +AAw2gC89Q1oHo+QvkvoNdPSyXfXWyfj1crlOyMZPcgVRrDSVT4B4KkJ50xeLtbzf +qEImrLfRRQeneDeIavZ5fQ5O4fTQ5RrA5H9r/X9fJt+19LZbG6VEJWVBCplKjXHc +4vO68WCwQIiVef5cdcijx9dvmtoJWazjOvXzeN6TvwQemUNxPn93b8D6paGStKAh +CeK7g8zVhwIPMyBQzPKZomvbyx+Kpbc1ks2ESQz9uq5cr16hYb3W0N4oigKXB5mp +csWUbbmW60qrFFS60w5pGgUDG5WVc8+bMnJwUmY7PfK8GRHfrFS8Vn5XrCJ8lp/6 +S/xDSGl5Wg== +-----END CERTIFICATE----- diff --git a/test_certs/beefcakeCA-gathered/beefcake.intermediate.cert.pem b/test_certs/beefcakeCA-gathered/beefcake.intermediate.cert.pem new file mode 100644 index 00000000..d053241d --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake.intermediate.cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1Rl +bmFudCBCZWVmQ2FrZTERMA8GA1UECwwIYmVlZmNha2UxETAPBgNVBAMMCGJlZWZj +YWtlMB4XDTI0MDQxMTEzMzQ1M1oXDTM0MDQwOTEzMzQ1M1owbTELMAkGA1UEBhMC +SUUxETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgQmVlZkNha2Ux +ETAPBgNVBAsMCGJlZWZjYWtlMR4wHAYDVQQDDBViZWVmY2FrZSBJbnRlcm1lZGlh +dGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCfXyeKVpCcBvPcVito +0ZT6NB82Tff3K8nH1WotOyPKKq4lgS/q/59WbjNIdZ1i7XL1hWBndSaFqcSS4mP/ +awIqDKRn3KnKvg8xIvrzvIAW2fHtjwL94j76B6TH7lPo32JyQWuluYFzEZVaIGDn +6QnGBXW4vme1J1fCv4xm6jmp36sz0UL4zF/c53ef88qaAbwgxHyLoM+dyCJlG5Bs +Wa8Tae9vccDGMVz3VmSSUSlByGMRtGV0OgYueyQ3aLx54j94RrVDfyru8kITJ0nI +FcTdnG7wDOPbZIWGyyPb29NqgyyS+a1KzZidQZNjU4/hcE0aOrb4B/29QO6qytKA +PVIxtHg2V5p9VbuIYVIg865iZhozZ0/f24GDqTsc5zJMh4rdSkWEzZp1MR9sHzAe +DM10de5cidoVVKw5/kwdDPd4ulkKAjswjbGiYnWrNCvGydWljnWgnJ1W46vRUNzN +AOyx5TmXIOv+FkZrdsedPrmyVZqi7fIXuguO0sYlI+ZZqfWoM8Nu0awYjbfYeJP2 +qL1i+9s97sqaS7zfqc/Q2oP/kM1gEwd83pQKs3W1iA8c3RsXyikFbHVC9Ma3ljE6 +JLV7HxZyMD0sBU/nrxctjknEt24WHjc6WXtxtbaOSPqdzy5iipNxY/OGsgBc4dge +fngTZsyqQWW2Xy/K+zU8/cSpywIDAQABo2YwZDAdBgNVHQ4EFgQUycazFgiBcRE2 +SSL7T0Ouo+bgWcswHwYDVR0jBBgwFoAUwVIsIw4HTdS5VXLvL5fuNUjrRl0wEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggIBAI5w90ZjZVs9FdAV6gmlZXv0/a+ZJaHlCGpIp00Tcdx2WIp5ZFg52O8Y/Ncg +ynEp7KDAf7fW2u6uT4m3vCvqa6OcyzOnH7U5jsPkl2AM4GaFNmWG1LEVm0xYCRS9 +6JogbFDALbX7z6VvII4XOnRdi78+ALpQ6z4pZoJ8pbjAURoDD/B+VT9+lrqJT+YW +IHDkIgUYaRcJsbxOA/UYA8VwD9JBJiFQM1S2vLgk/VuokhcvfYrCK0q0+gIeLrCm +2F+Dr32AYeGy9KxgVC60IyHoD2dGvIv6XyEeM42Se3iJunFLOkt0OXmGRKxmAjXv +aCXh6JlOUgmPDwsYnLwH9+WWF9Nf/ypSIXQSyc+teUbwtNyp8z1tRT3tI8wvqjOs +Ocz8dLuj/+rk0slX4s6vrf9bXUOY1XMB43EABHxEEXcodENcQlhfmU6SKmXnygFU +knUtaRn+dcPXTl71ee8r/POcxxjGayQmwW7X8/DaZeDar7k/lyw6DPVspZh3S22o +8twB577Pcr8y+yOMYzmaS8f04jqjYYi/4RghUOnpWRVIkCpu5UWyWt4hGbKHuT7l +P/tk9Y0CizWLPSbkn0JsNUkQmfT5VAzchw9osJ4jOCy47G+lR55AggD0RQ0g06D4 +k+F/2Ir2a6Vp7ly7JXJ31mtWMDNTZL4sibcaT9UZ0PrHku9b +-----END CERTIFICATE----- diff --git a/test_certs/beefcakeCA-gathered/beefcake.intermediate.key.pem b/test_certs/beefcakeCA-gathered/beefcake.intermediate.key.pem new file mode 100644 index 00000000..9bcec48e --- /dev/null +++ b/test_certs/beefcakeCA-gathered/beefcake.intermediate.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCfXyeKVpCcBvPc +Vito0ZT6NB82Tff3K8nH1WotOyPKKq4lgS/q/59WbjNIdZ1i7XL1hWBndSaFqcSS +4mP/awIqDKRn3KnKvg8xIvrzvIAW2fHtjwL94j76B6TH7lPo32JyQWuluYFzEZVa +IGDn6QnGBXW4vme1J1fCv4xm6jmp36sz0UL4zF/c53ef88qaAbwgxHyLoM+dyCJl +G5BsWa8Tae9vccDGMVz3VmSSUSlByGMRtGV0OgYueyQ3aLx54j94RrVDfyru8kIT +J0nIFcTdnG7wDOPbZIWGyyPb29NqgyyS+a1KzZidQZNjU4/hcE0aOrb4B/29QO6q +ytKAPVIxtHg2V5p9VbuIYVIg865iZhozZ0/f24GDqTsc5zJMh4rdSkWEzZp1MR9s +HzAeDM10de5cidoVVKw5/kwdDPd4ulkKAjswjbGiYnWrNCvGydWljnWgnJ1W46vR +UNzNAOyx5TmXIOv+FkZrdsedPrmyVZqi7fIXuguO0sYlI+ZZqfWoM8Nu0awYjbfY +eJP2qL1i+9s97sqaS7zfqc/Q2oP/kM1gEwd83pQKs3W1iA8c3RsXyikFbHVC9Ma3 +ljE6JLV7HxZyMD0sBU/nrxctjknEt24WHjc6WXtxtbaOSPqdzy5iipNxY/OGsgBc +4dgefngTZsyqQWW2Xy/K+zU8/cSpywIDAQABAoICAEozIrUpPXWvVP1KTzajkVF2 +T+vYHDFDfVQhPChn5u9atK64eYdXUSeL8itzuv2SoNnHqc0xQWis1F0wsy75cs4C +Cc9jcNf4ahlN+HnfUuGQwi8AzPtISVrekS0WfJGo42xfog9MEYVx88+80HFY0diU +tSz41U8FzLnatbM6rPM2tD5RWPV+LcmrKMpbVoIieaTl5jiZ+9+xjLX3QeP4U2de +BICl/QMpe2eanQN0bxl2Aw38p9mXpEYixgdYJ5w1pqhIf184B1StjO7e9V6HKBun +mA0Ca1m7cOGDlaPosoNeJDQfs7tmgn4AyG7PumbTAsrJ+GL9UXk6EISLHrtfEXNe +82oxoLJHX7YKvrrEjTSZcaVVfuLXaaxlM3+qsi7cQ2lz0+doCipFewgtROwpzGkS +natR4yuLSD8au+lly8bYDuWjFABRpuNKIkCNb+u29cjz6slAfeNmHs1UNAaOptte +b4UhreBu7NnGbZRKyMTM4Qk/iFlCmP6Yi2dDA5OkzHfHSp2UXEnLeLptdmAatCZA +On6tTgQBH7Kl4AEPXpwjJKPB+41+XHB2XQ3aOc0Me9hnTM68NgmzehnFJoRPQgwb +tJr+oV+hQuF+hGgPlwjpWpglW5n0SlWFuDY8XEtTcCX/xtFpIDtW4H/JKUyB9lsu +kV96IYY4G0WwzdaNd43BAoIBAQDdwnDtsK5zaeJMozDBxfbLy5OD3NEfU7KKMBxr +LEvRHdS7mbxg1NpIQ4zXK9sRY/r+y1bqmHDnzHTtS645Dsoz5zk8czRnD0dVwf2K +fDo4sHth9v9LBioD7K94RxRSN7sOcgEUgowKcmSbsrBuq8E9yhH8QlpPQjshz5L9 +VlL0qOY1nszxtpnuzYdMoTM6khDyQrhmM6L0OjoU1AU6d5C40MYTBq7OAIP5avDk +ZKeCrJVUZAdsE5r2xnsrcSlEHTa2KsnquAyt8bDMyT9+xoVd3ShV6wuyPmvYUAIU +1VkIQybcbqW00FSkQ+2bQSyOEjY+SfV3Z7Org4yLfdE512rBAoIBAQC3+rCLETzd +JHPXgV8+IM7rDf541OUPNeTUl0m+jaGEEAhZiL12vRYuxkEH3afWInVXapbYHZUm +1Pf269WYXic4kQmLM3Y3PqMCsCPynbcBLlxlCX3fcGw7JlrX7oCTtnhVAQTzOIj3 +FOE8o0J2lTvsF3Ghy8oYSiIBXVVS5H3CW6Hy+yhMuD35Peyfz8wo0/Fxw5aazYPw +uyjbBaVUf9VPBqZAqYyRo+jD/P+RgA2TLJYKhK/MUKRb9C+WKXP+mUERiW8RCIMM +BYE2P0r77w4HuLoRG7CJH81WNM7S7AWoNjCghisgBhdEhRwP9Ril+bt5F4cY12r0 +dvcB6GpkGXOLAoIBAE2188j3n0qGkmz0ywM8kk28W0dcPFRUCj7uuSa9cRwo7gGH +0EHGlW+5bEOC9NPVS4G6VA0FupsgROFbIgSQsD8cCiyq2nQ7IG5m+qhq/CQdhD9C +RRhPYq3ZZWwqzG47ZWuce9BqJxUKHUOKjNVbju5OP5s8zVT1Cq/Ag+wPMibWgUXN +Dlit+qd6kq1B2CfwQ8wBoYj/E12ugegGnIZum+KIFsFYGC3MdHMVDLGcrC7Yt8PJ +AejXvfg4ebUxhJp/+kpAGF4OfTL/fUtmQr/0ZYwNEjN5mLGvui8lanVOhlKk5Ztd +kugYEXE29K7xWeG3dBbD6CcdyhKSmM/RGV7BpIECggEAb2aBbcOjSfhfmHdCt3jg +Md4IXxvyEvK8O6nbwmE6EqAuniA5MOOvweTTzbJ+hR/aU3M8IWJQRPIhr0b/z8H7 +F7piwAIKK8tmoyMKsUakYDe1RIypxhjpwCsqSKOnAUo0tKvoVmm+LXZcJ/5nKSyV +2xAbAzBFj82bp53fI1EtgPlBJ8LClSnehMPlOqN1aKdlBFC9KcEVijeZtn1FrdO1 +0qMhGfhQIIPSVpIkIfMaxSnzNC1B0YJm8zL5eYBiNyCPENJaCUNRLn9KzREoE0Ke +CTjmmaNdkrpJ+FADiB3YExG1Ag4boMlvX1MGOfdb1GxSAzGm4PjXT22VhXc4rKHo +nwKCAQAkoW8qyhQeUEcbFJRv5caM3nu/A1lHHOpbAKysbhB82fZZI8fbnaZKtytc +TVo5tsZXeMB5VdW/NDuOgPIIRzf3YuhbM1IT/j6ppHm4EHRLdQMDuOoFsXIS63AR +S47sqfcIzERHiarkVH96dPvPw2IFiTIpfYqIIy/rmCTgHYhcBanvcJVm1YrZJ1/5 +ZQaeJjSL+DwvnZoBov8lTn+kQ20zFv/c+FgA8ApIDShHdvctxCH7ZDeAJ6cUOFkx +DxMOVU63Z8GIGXoTSRpwEayCG7maBYdPdHnHjPhR+G2/oBrTN+SJE3ZrNsXwmh9r +AAjMdHdL9kDDxaMwkD3G2LzNHR3p +-----END PRIVATE KEY----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef-athlone.cert.pem b/test_certs/deadbeefCA-gathered/deadbeef-athlone.cert.pem new file mode 100644 index 00000000..6901bdff --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef-athlone.cert.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFPTCCAyWgAwIBAgICIAEwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgRGVhZEJlZWYxETAP +BgNVBAsMCGRlYWRiZWVmMR4wHAYDVQQDDBVkZWFkYmVlZiBJbnRlcm1lZGlhdGUw +HhcNMjQwNDExMTMzNDIzWhcNMjUwNDIxMTMzNDIzWjAbMRkwFwYDVQQDDBBkZWFk +YmVlZi1hdGhsb25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtlgB +g3dX+L9FgQDU9JqaZIddfUaaj4kx1rI9e+FOL54omRSiTA5IHuMg03VQB46g6Ko6 +zo4cPrLEm+CekNJOjgvX5yUbAvSoFFgWhYbykA1aNMV+ZWi/W0AdOiH/AZC0kd3i +hiUoC5104Jnhu6oZrL7x1KXwDljOQbMCYbp9k4Vm73mPmpEY1fpIQhmSlm9czQcn +2KYFy7KG+RXhhjqjZKWiChCcH7scJME3Tu09j8STke2Zs9fZqxN8Jg2vgCIxJBYg +8E8b1+soHx5vS1n92AQtJM8MibgZAD0S2qDoQs1VQqTfTARly4DR7TRGO8X5mM0s +dIYjgkW6OD9opuvXMwIDAQABo4IBNzCCATMwCQYDVR0TBAIwADAdBgNVHQ4EFgQU +/YqIGUOKBGLWWW/TbVVs6p/6gI4wgZwGA1UdIwSBlDCBkYAUVs/rs9CA+viiumd3 +mv2WjT3LueehdaRzMHExCzAJBgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjEP +MA0GA1UEBwwGRHVibGluMRgwFgYDVQQKDA9UZW5hbnQgRGVhZEJlZWYxETAPBgNV +BAsMCGRlYWRiZWVmMREwDwYDVQQDDAhkZWFkYmVlZoICEAAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA5BgNVHREEMjAwghZh +dGhsb25lXzEuZGVhZGJlZWYuY29tghZhdGhsb25lXzIuZGVhZGJlZWYuY29tMA0G +CSqGSIb3DQEBCwUAA4ICAQC9Y6lZDtb1qSwiIrczUaAZjzv8gyDfdcuqi8nq3CCV +TNH6SRDc4YEpZeRJE+PrKdUNEtfwnCf/3VW7TVankkIq0B7gG6H2M1kTW1iMP+0b +5Jy1GXRNZoB38A+ATCusNLEOsrBzv3DwuL9uR5ct2EaDaNPUf1eEV+AUce2EaKxY +leOf7gahHq4GxcYj9bFKQVn2sv/DpTv/7eJfD1/KFKyWc3u10P9jbA0TJyEENWwk +kXVg2G69nfkyizzz1RPFhufNAiqqQx0GKafQkq75MjSmSpETPRpFgrh2zUsMf1Xx +LcP19UIOp23lcJPKxXIwFC9eQej7gGpESkm5wqQdPuloU3/lCOL3u8vec1GAqDQ5 +Eew9jg/FJy/xqPLVUibv61IS/Os28xU7EQMOsFJmwbshUYWYE2mASIDBS0wicyLz +/cMNlKbkiBCk5ywYHSKkXHz3MUJajcjMUlZPPW4nZdvtzrzfCdPYDLkOFFNW2T01 +NzZac/wmU94Wb7Rta1d57JU2uI9uu9dybnAzI73s3bbzQdAjaVP3KxQQ5iwrDmIX +NgEblyElDIbLXonfpX7plxErAesihBbrrdOpWsdW1SiqVNK6q2m7GUWxMJB/3UYF +u7Ye5QmOrc31+85W4wFLRainW4xE/y8TUsIytOFRv7sOrOV5KFyNZJ+BF0kwWd3l +gg== +-----END CERTIFICATE----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef-athlone.key.pem b/test_certs/deadbeefCA-gathered/deadbeef-athlone.key.pem new file mode 100644 index 00000000..4ff33b51 --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef-athlone.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC2WAGDd1f4v0WB +ANT0mppkh119RpqPiTHWsj174U4vniiZFKJMDkge4yDTdVAHjqDoqjrOjhw+ssSb +4J6Q0k6OC9fnJRsC9KgUWBaFhvKQDVo0xX5laL9bQB06If8BkLSR3eKGJSgLnXTg +meG7qhmsvvHUpfAOWM5BswJhun2ThWbveY+akRjV+khCGZKWb1zNByfYpgXLsob5 +FeGGOqNkpaIKEJwfuxwkwTdO7T2PxJOR7Zmz19mrE3wmDa+AIjEkFiDwTxvX6ygf +Hm9LWf3YBC0kzwyJuBkAPRLaoOhCzVVCpN9MBGXLgNHtNEY7xfmYzSx0hiOCRbo4 +P2im69czAgMBAAECggEAOFtf8GGQ84Uudq4qo4UrJdn+p6sMN0PIfxpszV53+1iv +JI0PkmExojZB3YlbGFLZYb/HWDLtAOXgxWq5p3oQpG99pee+fv3d+xe0skVgr+wo +jrnhlHc4+XuJ7P834SR34sOI8TwHsftS9d4pugZbVBxaAwbceJqy+PRaTplEmbSx +6pNOn4xoRJWnvOMt+7R+pkODV9TyBg5ZaYdKOe81AHR50lT4QaDbenhHTosREV/V +IawLqjGRHCUt5aiHzZYI2IOn20kt1SOejzJP+Riu/jGfRSbf91hr33lMpP7p2QT2 +gkHJ4b9CxWE2v8EtJomkFOBRiBzX3+IsoYc3HthHBQKBgQDGGEqmoOv6ZhmG1get +RYVeF9bqKNYHf9bPsTMZcbkO/p9mM1yZToq5fIrdpqToVVnoIN4+kPhUdPY6oUXZ +QxCBFMiSGqIAXJjX1T/4x4LfYUjIITdb4nzBp+8Q6jx+axmo7LqcvxNbBtKpOGsY +cpMnhgl9cylDeDHubIpMhNCUlQKBgQDrpQl44SNzJADg75GUCf/ciUR3tLmoE3+P ++fq1nMNdNdV4gy8aXIvktbnZy6P2APtpm397G9Sdo5XvPMx4KX55j8/H4sNRboAW +gejwKx9Amh/ygWA0klKf/H1mjptrGdkaFGFm2RZExwTE8brXdKY4vGgPySxk1GT+ +AY/uhrrCpwKBgG9S63Dk8niPtDzAPyQy85ZDU3XYaR9hyniBnrPn4xX4JeArcRh+ +uRswUGHr6rTecl4vSnZixgvAhze/th84vyerZGQnL2+AmiyBHShUBso+Q8akVBYB +HBdApuscl857pbn2/5BjGR+IoA6VNCpTCTum6eJkO4Y9H+JbgQUrhoRZAoGANoUh +sukMJaiv11s2aHUD0Meloriat+uC+RHSUtCpVtiMwmw/qq684za0gFKgZduGT8Hd +deNThwGBQC/l8utOytteLJYnsKCLXCnEdkTT30OJnrcWRjOQkFl/vgnCpb3+QA11 +F7GkeFc/oD15iD4B82CMBxL5hX6quyYrYfmkHkMCgYAB106DR2PPx2FznmNtN6Ja +AAwdXhcFm5GzoSVyBblL603g1V3Mfc0L24lWEBQACUOL0wHrHhLf6RXXRcS+eVYb +zT1nAsoKVFlnX6nXD/IiHUBb6fYItg1v7MXEYgb9t+5lIBVpjA5ZvOXvF3kVqleC +gKHyqrz/4ETxdM6jD+FF9g== +-----END PRIVATE KEY----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef-dublin.cert.pem b/test_certs/deadbeefCA-gathered/deadbeef-dublin.cert.pem new file mode 100644 index 00000000..ed8805af --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef-dublin.cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFOjCCAyKgAwIBAgICIAAwDQYJKoZIhvcNAQELBQAwbTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgRGVhZEJlZWYxETAP +BgNVBAsMCGRlYWRiZWVmMR4wHAYDVQQDDBVkZWFkYmVlZiBJbnRlcm1lZGlhdGUw +HhcNMjQwNDExMTMzNDIyWhcNMjUwNDIxMTMzNDIyWjAaMRgwFgYDVQQDDA9kZWFk +YmVlZi1kdWJsaW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCLdzoz +dy/4+Yn8mPKYcfrDEGX7W8yz6/dePuIEzPrFB3RucdDlAG4GHa9HQIm89OyIqPqL +xc3Kzt64b5pBlnRELz9LaN8VzYR9mjLRKcWbX3Z2O2pHVX5RqDuUQz85fB5f/0v2 +XKW+jmRuLDcG5QcK8Tu7GGPPcfl6WMNu5JW5IiJJQnfFK203BVAXRybsiXGVlPOw +Wn72vDza2yKidQR7Dtv+9jvVLrKWxPRf79L2MDOuZ76XS8RdSWAPW7u3rVNZVIkl +tnTHpB+Nt3kdtySMz6RsLY9JBAniv1Rm3B6I50jR/ZEBv3QXFmgWxij+nc2iQhAB +vUfZRrkEPchKbrUnAgMBAAGjggE1MIIBMTAJBgNVHRMEAjAAMB0GA1UdDgQWBBS/ +z7yCAXICvjkhx49FqrPPhkmZ1jCBnAYDVR0jBIGUMIGRgBRWz+uz0ID6+KK6Z3ea +/ZaNPcu556F1pHMwcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8w +DQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1RlbmFudCBEZWFkQmVlZjERMA8GA1UE +CwwIZGVhZGJlZWYxETAPBgNVBAMMCGRlYWRiZWVmggIQADAOBgNVHQ8BAf8EBAMC +BeAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDcGA1UdEQQwMC6CFWR1 +Ymxpbl8xLmRlYWRiZWVmLmNvbYIVZHVibGluXzIuZGVhZGJlZWYuY29tMA0GCSqG +SIb3DQEBCwUAA4ICAQCDCQwEdmYRB0yid1OC2A1MBnLjPz9wXy5ShMcfVNUj9ZjV +hkozrDb02rzWLyhUUQQgIkwVK4D9XLWYnaxywEXVW/XFEIJoL2k4A1oV58H3zSNv ++IMOTeE2cGeRM+szCALNpvpnAsFZ32cqrUirO23ypgRa7ELe3NtnE7wM5KTmXWOA +bg37vjqsoT5vtNsiWXDSbMaw+JwLSR0oUcISMyf5x2JffHtoYYOrOgNVKC5be/3D +u1fi0TvFkJ0KKSCRhYfq3Hi/PUppnnsEtOWyN+Kv4nD8Iw+kewRRhL3Bci/QafKu +a1Uriakq8LT5WFl9gAT0NyBBKpEujIbIE4vtgmVTmr/91mx9Mz2cNgEWjgP+nLMi +1KQBjAIfaw70FKrPO+ek8JfzN6HDBv6U6FCvJYd1qZkL/Qp9x+AdI6pYY7wxsprL +63crPLzW/aZtmCHFf6dP04Ss7kh9xDAf1EadGyR0+gNC7ho4B5KjAmohnOmRhR6r +52PgQmgiZ2vy+Nod/nAUhWcrUoVEPCx7+0rcrdANhXT8gVuILIVeINlBSHAPuzaf +jEreiuYhMzcFtr9DqI3qE0Yk1xTpsL14P7sfmEt9LmB+mbfImSKC2Y3z47VU8zn4 +NNWznldC1QjNvNjfDZp1cur0H0Uy6FzUkHSGLeRXw6eeq9Ge1Ap7RpNLna1VHg== +-----END CERTIFICATE----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef-dublin.key.pem b/test_certs/deadbeefCA-gathered/deadbeef-dublin.key.pem new file mode 100644 index 00000000..cef5efd3 --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef-dublin.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCLdzozdy/4+Yn8 +mPKYcfrDEGX7W8yz6/dePuIEzPrFB3RucdDlAG4GHa9HQIm89OyIqPqLxc3Kzt64 +b5pBlnRELz9LaN8VzYR9mjLRKcWbX3Z2O2pHVX5RqDuUQz85fB5f/0v2XKW+jmRu +LDcG5QcK8Tu7GGPPcfl6WMNu5JW5IiJJQnfFK203BVAXRybsiXGVlPOwWn72vDza +2yKidQR7Dtv+9jvVLrKWxPRf79L2MDOuZ76XS8RdSWAPW7u3rVNZVIkltnTHpB+N +t3kdtySMz6RsLY9JBAniv1Rm3B6I50jR/ZEBv3QXFmgWxij+nc2iQhABvUfZRrkE +PchKbrUnAgMBAAECggEABNwBI6Tt/3tWYv5DSauDBMPwAbuXnNdGdbq0Ll2kCTRy +ypxdnnUO4FitcEqf4f0YtsRIgpFAU+Y0XXHKCqn+4BdTWcHMMBrKmIPEbIRLldfU +LVdxow2oCplVhht3uv2Ho2mmUG22+rZEapBQ1IUrYya3oZO+eo5qlmg8WwNM8jfg +lg9loRrhNha593BFHSpM8OVMtorKHjw3YEq6YCOe1Kse9qH5XiMMP2r7Q7LMpK3H +WYx+yTCIOLgnbUt/x4vjHc8kTpy8JqsRjoRNlhTpb0xcJ2e9CZc/q0z2ZBAtr8t1 +OGf1pFNXsL/+9WNNIj3svumWh5Meo4AL94L6GYysNQKBgQC9GEp/xJsfLLy3bsK+ +Ncg/cil3zk5QjMD4/FuT/ymMtMEWiCeOFlU5e9Wd5d3Kn3RWeL+5aP47lZoFcXTo +8POFD7WNHWlv10SYMCts5lSozyleWka4S+0d1vjRMCPUwzxmxFGaFedGu0bwmeeR +wnd+fOvvpt8Zhco1fdp4v2n5IwKBgQC8z6kF50g9Vuqy/DxflZSCN8vw8S6Up33b +8YuItgRzUF73GYo8gNznwp+qCJv//TlPd7SmpxBx8A7aporUc+o37Y0Ium/Mq9ml +WEsubCHLe4dH8i/ijhHncarbN1QlrRl5+c7v4eyw9rk+8SctvGU0FwFSng9txCJ+ +h5JFj/oOLQKBgQCbHg8XCq/5fdh9XRXRtQrEySCtUAa8Wwp99X4mRL3P2ANBOYXB +MB7PXf9ZXNR5QB7ORJV6Ql+zf0Bu0KiH4gjSEZXr4cG5Zky1z2YAGl4+cFpONR4o +Vth3jv5jk+sAwPuOjeMm1Gr5ge1qt8XXs0AU+eMNn8UKCc9b3BbY0KTdIwKBgARv +D/YOh1jBaMOJPtWzyogATW68Cy78e+0lT/SG6bXXhlNhKyjekdFzdZJln1w0kxr1 +oa1kMPOsAn1J+eqhSjfEAVLaJfP1eSfz2dgeO/DuVq/u1QcJ9QutVKjPNM+pr/Z4 +Uk65zHgfYxxCR3xAd1Pw0lwhUBVwzKqR/nJZvjKFAoGAPA8iosys5HXOSfZtz5ev +Yu7ou0b1MZj13WPcPdUFZFSjE9mq4nsKJLqGVuwfvlWzVh9qyxxnV0TALHNFFgmL +9VeKRniCiMzF6g3DwMfHYb4GhvOUf2iYdIuS5XEk2ZNyOAipskPVaTXvSDW7wyw7 +9wISWQQjbg4OMRGjQOlkGTk= +-----END PRIVATE KEY----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef.ca.key.pem b/test_certs/deadbeefCA-gathered/deadbeef.ca.key.pem new file mode 100644 index 00000000..53ce5276 --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef.ca.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCm1TrHbr9ls7fY +mlCOM/JdSMbQ49gVXj71buEBSNCfstLpZn4PLiIV6I2pK5WD2OAknpqOWTLDqXHf ++jXggP4kYU8eP+puWGJOxqiJLA0GMupBRTsWxf7zyYgGrtTRGyH8kXwvw/K6KAdS +EgAjgxt7wgCqcpaA7oHA6qht71u+TNir5ZCQEiIJDM8jarFu0MRTlvVIwloImaVn +p013SjF8u62EJbBrtVmDmNKWpdEcQsXCWO3LSyWPC81Ama7nzS4s+8KQsYXauhLj +b6XxH6trxxv/tbUroDMoCEUv0RKL0ZrpE0axz4WIGk6hxq7o1AEssTBrEpJTUw8s +rslNSf/vgvq4SistIRtBCGMtko3pmeQQDy0JrPdyyxjP/+uIMlvxxCkbcO7G9fFC +zP6IjYrugFsxD1f4dZ5whj/LmkPhmtW2nMZMIxSnOJ1GS0F51pwTblTfrHvmJN0U +U7wl5ML4peWXz/NK8k6+zK9JrOUREZ5VLU7NP4olsN/P5M4GYjwRbFr9oHF9dRVC +/hyWliWTdCF40npqqTwJaZyM6Udkht0cWlSKH4Zwfy58S6qn3TXOgOboAQsqr9X9 +wikRjypdtfZ77V3Cv9wDCI8qguj6Td0HWck9d6jlMEGtDoCQN6m0ZDuH/sOrVgKe +/5FjnwPYD3U2y1pvLxGsSS9z2jvJqwIDAQABAoICABMtcK/LKcI5mfkSqA7opsQV +YRFTF5xBNLCG2eOJaL3q0vbUesoo7e8hx+L56Rp6XC2pc6/ITh8Sw83Q18+iVFR/ +QRaDl2Gu1EIk8OuvviGHycjN+gBgqzF/6ZyHDVyqJ0p+uM1O04CVqF3FRvHOGOMt +hmERgf55LD4uW0Uj3iJud/uujGMbSoFK0SbLlXDdG6xWLoT6HlpduxdCspNzWmPx +uPz3TiPbeVo4v4BkuDhs7omsKk2TqlIas7Ho2KO5wMSX/iIe3fmqKRCu3V7t2okt +nqjSzOwiYq8H4FgiSuT6p7mCJz66Uam3CHHtQl7uO+JUzfGI9XwRkK18vBQf3QUN ++7olFgs0yt9SAxR0ydi0nx0GfltMFi4FnZ141Ie807DAZVnFDCuRoCPI0fgquV4e +KuyeZIzRacLP94+Vt6XHw6S15FPOIIaIILM8jOmbXSNcKYg6p7RrEDEFACE0gSZB +8aFwTFD/TvL0ivLR3kqRQjGUr8YF+2t1MYDer/ruYP7J3mBx0IO7sHGnF161L7yr +JqLrUyBVZqSaN3vyMJGP1liAM6Fz9icWWF76fa1XV7btb1FdfiLH3giLBrcfqZ+0 +jXPxw3UTzXo6eK1oB25Qka0i5nYfEFyWLqGOJdbSqeNXL69k//bigzuiQ5zbE7A7 +faGHWH+TxxnnHKpSHmMRAoIBAQC5zDi4jpEHLPq5WYgoQGsNxlbrxsPAaILCabRX +b6iY9E/67GqdGtTeo64QDklJYg2IeJlEzCNi8kOrw5smuMoRPwap5ZRQf/+miUb7 +5BPBUz8s6s40nVKpfd7/vZaAtsBTylTloAiAraTxO/ldjqRaRLMiDw4NH0A0Cj1j +rQkMQK6aVTD7bgX/J5VZJjeLc9VPB5eVcU0Ucky9AUZzKOgx3hHPkh99xQe/JjKL +q6BF21sWc4c5RK3L9zJFXd3gS9J69vv/kCa5JoYNdRh63UlYeoxc3p6re+8xpum0 +Rz2aiDN/lqidRMQiDbyzqF7aa/RtVtgnQ/Nw4Dv3x/eS+BhbAoIBAQDl3pYa21jF +vSIsXObrTaoYhFwGc4QculzDzHtrhVobiiM4B8r2dQf/w4RcPD9PhnyAKqzQo+JT +tec8CJC2LFn5cQUFNaXzKGJLRQmMBE+PnBfith9k8ad86hIznzNuo09fwP5PGxhR +ylh8SdOwVYsYEdo4Mc6qttSLQsC61apbkw6VsyVHaaoexte/Dzf2CRAyer3qedO0 +Ut6TWvWqE41TdAc10W5d369hf0Nze0FB2pYJtsDV7bos/OfTeM77oA5pBUsupK8h +ylW3iGDyUetfcQAkXaFPs6id+HnkT8BkHqHEjC6nTHHHzmlw9W9Phn9DPy2flZgO +o2x4iBwrPVTxAoIBADWllP8dEx0aTK61/k15BDAvA6wCcUUeKBtQgCT69Zhseqd+ +Ge6Q78P1SO4T4e9RY4zzHXYy50jLoOR4j1Tn6FiAdUEbK+7udne0jY1vtpCMo+Ip +Ti78+gA8euYm7Mzvd6r58fd3dN0KxmRtJ7xu0l/KFFsSM8FwM46TIlSkgsdJ6eBg +FsuSYiLhYAsksMcQRBgUnh0lYNc1hZjRRHE974VulH/1tigPewQjWFbh0F47Txq1 +cucDQJdzIc3Lo6P+4KBmri6FhdjSGcTK+a2INYRZpiuuj+v91fvlMPe/1jbHeu8t +JjR6xZL7mm/ZfsHch/AGMlVsFl+SL16FG6QRBJ8CggEAGqHcUEZDlb1g/TTSSEba +QzbqlpDBqLZq0wCY+A8eLnylsfMtL7rFb3uOlUUhrKwPNqY7GX0zVrGHlewEICcp +UX4nr7/Ud0Shhsj73y4Mrb3PAqRJJN/eoNlZvINgaRwgsR5iGHYyLh+8esHu9coj +cGl5F/OlVyARpgTY4YypzSxPj0aPNiPm4uP00R0JRcctCjwwTr9IqObFBNuQBOF5 +CUczIqgeGmA1KJ2fCf0oxWF26q8yzlZZsKaokVa43Gxh3wcO+9t2svqHTdlOKiUV +mRw1gQEcFGjjS1IjhTE8QB1Qbr4zFY0Ezbp7euvktpkFm5vxREEHugJzKc/TO6tC +cQKCAQBz18L9/wSx9ZeaiTEGW8v9+JAgX0qfsUYSP2k1x2eZr6VooldJFJZvquYA +zipiRX0omDdqVOx+HAkwddKmKOvEu9kP1bKVUFU8FTeslEVszVUYGlGIHdGPxQyV +XVvAobefGh7+hrG53M2093ZGDnNh7Qh7JwH8O+lk0h4gclPASt+llXaPY0CSUAoA +gjQZrpgMqora3wpqnX9IU3kNgaTV5k+4PZF5LjWqpaiwqG2hnv2OxxxgRMB/+82x +RplsKOKQw/wzP7NIOJAKKBs05IRCorIwLJmZu6vKRksPFlMPpVBjCNp4js2u6w9Y +qIszzqu2fJ63IijVIhPAVYiQH7ok +-----END PRIVATE KEY----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem new file mode 100644 index 00000000..32b74432 --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.ca-chain.cert.pem @@ -0,0 +1,67 @@ +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1Rl +bmFudCBEZWFkQmVlZjERMA8GA1UECwwIZGVhZGJlZWYxETAPBgNVBAMMCGRlYWRi +ZWVmMB4XDTI0MDQxMTEzMzQyMloXDTM0MDQwOTEzMzQyMlowbTELMAkGA1UEBhMC +SUUxETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgRGVhZEJlZWYx +ETAPBgNVBAsMCGRlYWRiZWVmMR4wHAYDVQQDDBVkZWFkYmVlZiBJbnRlcm1lZGlh +dGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDAJpLiyPN2uxEB+7Dk +cQ7i3hIl7gziuCTs2+hm3SkBhFcojmzBi0w6te+1a8/1eRju6zDMsefZafQO8eh9 +OByDYbeH5m5urfyA6D5AGhfqXFb1VSbMq7ClBO6Ic9t6HfGjZKYmMNm6MEufCLjt +JDGBJXraC21YpsWyLCWLg9BWEaSmd/pq4dwQ33CWV3tyzZSxrDE0jW7V1Xxzyx+/ +xN1EsJlUTNtZTQ/ldqfLrtEvH4bBgpvhoyUxq/EH1XXrdq2h/9Qbmt5hNqKQzuPQ +SWUmW3Fdyc6HSf7UiLee1sQa6ojqw/Xgcvixzh8mirc9TwvJh1Hg6nJr57Uoj3pP +R4Itaui9rnYdX0dRAg7aw7e/Q+Ls42rb0bwobtkOOmFO4oL7H+BZLceInmr26Acw +nD0aRUuDGun6LbOtb7AlOYBVOsgA6EjuGPG9CMBsD5oYPOzsVkDI+npbJr+hbOPH +V9nM6DdG0vfFEM9DOY2Suj78G4IHQfmeVHU4xslIURuDMs/QCX1Fs+fN18UHnfbW +kq9icc2CxqyEZN/8DY1jQ0zYPdcoDUOXjxiL+6XvKONfVYuG58qpICF4leTsXedA +9XvjeeHJUaNseIo60BxHq0CNVbldhVUahTNN69XJXwAXMnsuvk6+iVVgbb7RsF2k +wOf1bBK158FMfjH8RdnSS2S5qwIDAQABo2YwZDAdBgNVHQ4EFgQUVs/rs9CA+vii +umd3mv2WjT3LuecwHwYDVR0jBBgwFoAUVayIT3aLcCr+VwmLjf1ok4PxwhkwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggIBAG/qsV5JZY907DtCm8bFvmizpVYGkp1Jlm2UE1Vq2jMwOM2LbTcSIAWAMWRg ++j7heZLSGayNCRLdC/A8rqNkNVGmea/HXjUIhTSyhp+4oyxHqjyrq4Fkoz4fKy8X +bE2Joly7w1VHHIT9YDh4svz66Y5HewwpcYhIWK93Bz/fGQ5vaXtYbB/u/my2fOPY +u2p4RCLJZpMGkO80Q9Zj3lHFY2Y2tqFAWNhMGTF0QNxsO5IBbCWlsRm1IohoO8J7 +cb1DdeY3hC1/LDMSJE3DPFBynwNzeElD2KKJbeXfppfNkd+kZCkJKcqmXdAurwFM +1KazTD7GVqEBqfnwxiCo77SliUO3yN+u5HMC9ZwdS1MBxOPJsRBrT0vxuLbdE9NZ +Nf00RdhJa01Kxy5k5iPHg2mMKKird1vnS+/RhjYGI3T5UOYsiMg995EIiPl7DDJY +BCws0HXUoGUnWk5fFOzzKcybuOYH2/bhoXhkLoqh7EcYDQ3WDRby+biUXae0iIcs +qOXkECxGmxe5bSZxevQgJeFhopuTVs0c1TTzl1rrynkNZrq/KXbhbuGeZyqfJMqy ++bq+qYcBCMF5A4MfblxHR9gss3RarBLObRWvweodFk8+HZ+IUItA8pCuVoYuMbdD +IwWMrKkYkn/DXfqa+Rr3IYG2FdFPIQPn5FQwGHqa4eQlC7lN +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0zCCA7ugAwIBAgIUbQwiRA8cH6XqafjVL5xehyX8hQQwDQYJKoZIhvcNAQEL +BQAwcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZE +dWJsaW4xGDAWBgNVBAoMD1RlbmFudCBEZWFkQmVlZjERMA8GA1UECwwIZGVhZGJl +ZWYxETAPBgNVBAMMCGRlYWRiZWVmMB4XDTI0MDQxMTEzMzQyMVoXDTQ0MDQwNjEz +MzQyMVowcTELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQH +DAZEdWJsaW4xGDAWBgNVBAoMD1RlbmFudCBEZWFkQmVlZjERMA8GA1UECwwIZGVh +ZGJlZWYxETAPBgNVBAMMCGRlYWRiZWVmMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAptU6x26/ZbO32JpQjjPyXUjG0OPYFV4+9W7hAUjQn7LS6WZ+Dy4i +FeiNqSuVg9jgJJ6ajlkyw6lx3/o14ID+JGFPHj/qblhiTsaoiSwNBjLqQUU7FsX+ +88mIBq7U0Rsh/JF8L8PyuigHUhIAI4Mbe8IAqnKWgO6BwOqobe9bvkzYq+WQkBIi +CQzPI2qxbtDEU5b1SMJaCJmlZ6dNd0oxfLuthCWwa7VZg5jSlqXRHELFwljty0sl +jwvNQJmu580uLPvCkLGF2roS42+l8R+ra8cb/7W1K6AzKAhFL9ESi9Ga6RNGsc+F +iBpOocau6NQBLLEwaxKSU1MPLK7JTUn/74L6uEorLSEbQQhjLZKN6ZnkEA8tCaz3 +cssYz//riDJb8cQpG3DuxvXxQsz+iI2K7oBbMQ9X+HWecIY/y5pD4ZrVtpzGTCMU +pzidRktBedacE25U36x75iTdFFO8JeTC+KXll8/zSvJOvsyvSazlERGeVS1OzT+K +JbDfz+TOBmI8EWxa/aBxfXUVQv4clpYlk3QheNJ6aqk8CWmcjOlHZIbdHFpUih+G +cH8ufEuqp901zoDm6AELKq/V/cIpEY8qXbX2e+1dwr/cAwiPKoLo+k3dB1nJPXeo +5TBBrQ6AkDeptGQ7h/7Dq1YCnv+RY58D2A91Nstaby8RrEkvc9o7yasCAwEAAaNj +MGEwHQYDVR0OBBYEFFWsiE92i3Aq/lcJi439aJOD8cIZMB8GA1UdIwQYMBaAFFWs +iE92i3Aq/lcJi439aJOD8cIZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgGGMA0GCSqGSIb3DQEBCwUAA4ICAQAfVC3rqM911cPqIHMFJDvPuEg/FcskXFCo +X73XS+N6UnmHEqMQF6O5VG+TvZH6YPqwIThlkMb4LFNliUr5H6mxJzXb4LHGD5lE +X4ujRQeXHsfP6kU9Pz/QmTK9rr++7vmd4bGdCIYjfk7boUjnKZEPbXtJQE/zBMI0 +5W09QJulioVULvvODufw2EiF9lILROx61PlwbcsHTOEilYJbfrfiNUjSQhKVI6G3 +PgAJIwe0F7lvURGv+Rl74wThWak6qjIV4HYXB8za7GinPFWL7OPoYupQ1E0YyQA9 +XtY87cFReAzk+4sFz8jxkXT5Naj1DJg83HhVPqzrA6VEanZGj4PD2AfgdGrVhWP3 +7+DMfJErlMXQmbES0RlUS9Q5Y9+QxgIFE19jwM1xQDSiz3gqd7D/JL0zIZj5lyop +MmWkUcdItRSM0vDH2P5RHPWR7E6vcMaJkoAShpqOOxqoKneB8Ptd0ChzvIcIcwvs +ziRsfOnOYmr/qcH7P4ZiQ3obMhjW7WmhTY0kqJamzGedtGd7SkNGQ15VavGUjJyn +9bZVVLxF6D0yQSL7WYmM1YZCyz7Q4GHpZKGaqQvgdnRSeWXcd5aEEJwGBm9KVm9S +qAf6Jr/Jm03cuOeRdPQjjSMz3egxZ8312IWEF5M3HvZ9nKNYKVk96Nc+Q3uZe2dv +DMAMPXWkuQ== +-----END CERTIFICATE----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef.intermediate.cert.pem b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.cert.pem new file mode 100644 index 00000000..f74dfc8c --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcTELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xGDAWBgNVBAoMD1Rl +bmFudCBEZWFkQmVlZjERMA8GA1UECwwIZGVhZGJlZWYxETAPBgNVBAMMCGRlYWRi +ZWVmMB4XDTI0MDQxMTEzMzQyMloXDTM0MDQwOTEzMzQyMlowbTELMAkGA1UEBhMC +SUUxETAPBgNVBAgMCExlaW5zdGVyMRgwFgYDVQQKDA9UZW5hbnQgRGVhZEJlZWYx +ETAPBgNVBAsMCGRlYWRiZWVmMR4wHAYDVQQDDBVkZWFkYmVlZiBJbnRlcm1lZGlh +dGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDAJpLiyPN2uxEB+7Dk +cQ7i3hIl7gziuCTs2+hm3SkBhFcojmzBi0w6te+1a8/1eRju6zDMsefZafQO8eh9 +OByDYbeH5m5urfyA6D5AGhfqXFb1VSbMq7ClBO6Ic9t6HfGjZKYmMNm6MEufCLjt +JDGBJXraC21YpsWyLCWLg9BWEaSmd/pq4dwQ33CWV3tyzZSxrDE0jW7V1Xxzyx+/ +xN1EsJlUTNtZTQ/ldqfLrtEvH4bBgpvhoyUxq/EH1XXrdq2h/9Qbmt5hNqKQzuPQ +SWUmW3Fdyc6HSf7UiLee1sQa6ojqw/Xgcvixzh8mirc9TwvJh1Hg6nJr57Uoj3pP +R4Itaui9rnYdX0dRAg7aw7e/Q+Ls42rb0bwobtkOOmFO4oL7H+BZLceInmr26Acw +nD0aRUuDGun6LbOtb7AlOYBVOsgA6EjuGPG9CMBsD5oYPOzsVkDI+npbJr+hbOPH +V9nM6DdG0vfFEM9DOY2Suj78G4IHQfmeVHU4xslIURuDMs/QCX1Fs+fN18UHnfbW +kq9icc2CxqyEZN/8DY1jQ0zYPdcoDUOXjxiL+6XvKONfVYuG58qpICF4leTsXedA +9XvjeeHJUaNseIo60BxHq0CNVbldhVUahTNN69XJXwAXMnsuvk6+iVVgbb7RsF2k +wOf1bBK158FMfjH8RdnSS2S5qwIDAQABo2YwZDAdBgNVHQ4EFgQUVs/rs9CA+vii +umd3mv2WjT3LuecwHwYDVR0jBBgwFoAUVayIT3aLcCr+VwmLjf1ok4PxwhkwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggIBAG/qsV5JZY907DtCm8bFvmizpVYGkp1Jlm2UE1Vq2jMwOM2LbTcSIAWAMWRg ++j7heZLSGayNCRLdC/A8rqNkNVGmea/HXjUIhTSyhp+4oyxHqjyrq4Fkoz4fKy8X +bE2Joly7w1VHHIT9YDh4svz66Y5HewwpcYhIWK93Bz/fGQ5vaXtYbB/u/my2fOPY +u2p4RCLJZpMGkO80Q9Zj3lHFY2Y2tqFAWNhMGTF0QNxsO5IBbCWlsRm1IohoO8J7 +cb1DdeY3hC1/LDMSJE3DPFBynwNzeElD2KKJbeXfppfNkd+kZCkJKcqmXdAurwFM +1KazTD7GVqEBqfnwxiCo77SliUO3yN+u5HMC9ZwdS1MBxOPJsRBrT0vxuLbdE9NZ +Nf00RdhJa01Kxy5k5iPHg2mMKKird1vnS+/RhjYGI3T5UOYsiMg995EIiPl7DDJY +BCws0HXUoGUnWk5fFOzzKcybuOYH2/bhoXhkLoqh7EcYDQ3WDRby+biUXae0iIcs +qOXkECxGmxe5bSZxevQgJeFhopuTVs0c1TTzl1rrynkNZrq/KXbhbuGeZyqfJMqy ++bq+qYcBCMF5A4MfblxHR9gss3RarBLObRWvweodFk8+HZ+IUItA8pCuVoYuMbdD +IwWMrKkYkn/DXfqa+Rr3IYG2FdFPIQPn5FQwGHqa4eQlC7lN +-----END CERTIFICATE----- diff --git a/test_certs/deadbeefCA-gathered/deadbeef.intermediate.key.pem b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.key.pem new file mode 100644 index 00000000..5af1db58 --- /dev/null +++ b/test_certs/deadbeefCA-gathered/deadbeef.intermediate.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDAJpLiyPN2uxEB ++7DkcQ7i3hIl7gziuCTs2+hm3SkBhFcojmzBi0w6te+1a8/1eRju6zDMsefZafQO +8eh9OByDYbeH5m5urfyA6D5AGhfqXFb1VSbMq7ClBO6Ic9t6HfGjZKYmMNm6MEuf +CLjtJDGBJXraC21YpsWyLCWLg9BWEaSmd/pq4dwQ33CWV3tyzZSxrDE0jW7V1Xxz +yx+/xN1EsJlUTNtZTQ/ldqfLrtEvH4bBgpvhoyUxq/EH1XXrdq2h/9Qbmt5hNqKQ +zuPQSWUmW3Fdyc6HSf7UiLee1sQa6ojqw/Xgcvixzh8mirc9TwvJh1Hg6nJr57Uo +j3pPR4Itaui9rnYdX0dRAg7aw7e/Q+Ls42rb0bwobtkOOmFO4oL7H+BZLceInmr2 +6AcwnD0aRUuDGun6LbOtb7AlOYBVOsgA6EjuGPG9CMBsD5oYPOzsVkDI+npbJr+h +bOPHV9nM6DdG0vfFEM9DOY2Suj78G4IHQfmeVHU4xslIURuDMs/QCX1Fs+fN18UH +nfbWkq9icc2CxqyEZN/8DY1jQ0zYPdcoDUOXjxiL+6XvKONfVYuG58qpICF4leTs +XedA9XvjeeHJUaNseIo60BxHq0CNVbldhVUahTNN69XJXwAXMnsuvk6+iVVgbb7R +sF2kwOf1bBK158FMfjH8RdnSS2S5qwIDAQABAoICAAX8TaAN5XOh4SZAKmL7WJzd +kRAoabNYTCIquYMWyZO4BSUIxr+CsNW1XCm5GYnKg2nR3rvJoPDHWahrhbP83vqC +vT5ZAcCl39M+KM5DYdJVnQnYZLSEdqS1Uh7jCQe1ThUqafiPlgC0R27FTUlp0rb0 +rW6EY5wa2dayhFrjuMYtl4TuANl6fqnx4+ZzBC/v0n10YTYDX9v+YfmTbsJBBimB +HM2g9UZDXcbmmtR/9hkzMLXvdR6iv+Jbuk7lqd4zXLZKq1dOzDWYB5leMH+JAcMf +nxjuU+Yn+m2xcpLamtJB0xEJGKwsuK2FP7gqIwogX7wf4H+SngzZc223hnZYwmfM +686EWMwcX2sxlCT0Yv0nPLNeXYbri+CLdu4XxX5ApgT+AlRbB+cdSt9NwUaCqcW/ +Tqa4WTr7Zkt1K1VtaPlSX1TNqF6tj7YQIb/WJ8qyhcOXT7aA7tmhnt54ajvj97AN +LDjiRwssCLryIym09czgumH/87xPSRuY7uDi3l+gZBxMRhfNRPm6VeAVJocyEFMH +1Bc3bU49rC++D2w++vQdchouadCq3O8fBq1iwpnkr5t2NAWs58MgOdmgrRpCWlS7 +e/dE7ZEaqDr4cKR02ln/sxiAU+aCAhtC09YcTMTgY64NRXQDPPa6Q/tBSHhH4GVS +y0XgkqntA42/fSjMXU5lAoIBAQDwDfLsMW22iEWgxKEgot4yYSBgHiJF1DUVbNhS +VYGvLP6M48F+LL/vMFzJRZSVdURlB7P/Fr/uTfXPe/CNzYtH/gw4UjCCHwJUFoI1 +fiJZ6lCug1YrLTmaPHtrcPgx3MnonW0BfUh0AOPuLtL7exEoCvXO1drsXW5FXNDJ +/A7McwZAAuzY0Hmxcu81fPc+V6+4xE9Bdo4Mva2zQKcoZNBkH9v0kqUmoYonjm19 +SUf0WirxGHzLsldfjWBa6NlS8FBBb91aAHlnLrzfoTTLwpwAg9YHKCfxJyIk4qlJ +vPEONF5qkhkp3fU6EIljL94I4LCtqX6NTORB8B9w07135D/fAoIBAQDM6gkgBs68 +jjGQZa7hWUddZCEF2iNPfVnRvscnnLIo1L8QF/I9pe0WjvuZ4x7C96+eobqLHz1t +0pSk7rkfhAyN0wZuvDNIpsMAbXnCFFRlvhBUH8igW6tNX9Znr1aNjqi6uSFrqhTm +DQLUeqtqTxExoTxrVePBszyMxnZuUv5uC3GlkKiQQaafYSdDuHccud94gONTfNqS +Q9xVw8MDnrk9ERD4Jp9vWVo9M1seR6+DnU7yLwWnVHqPUJsOYEk7RMb4Z4tslziC +nM7s1H7XXXuz/4Uuv472CB/fY92q8J1wIqTR8wh53Z9WtAxT/BxSfx+6QsKgjAt7 +NaqKzL0Lao+1AoIBAQCK4rciWjKswcjvC/RhKnm90h3mHmkb13vi6nd5o0kFrdWU +dA01z/Q4EdYNCPm/QvqQLVXBh0sCOA64vW9ey4YC4AP37e1syz4QonkxM8nyFbQa +U5heIKtMBiQt9E1Uo/YZZEN18iMiIoW8/1BH+VrivPycb3Ra/m9XKC0QP4ncVcxO +6qJXir5toR6u5p8xbZAYKVPlfw8OZguCQyyzHSjIVPpwVQvB/wIsgqXsL6K2yBhC +JzXEsGkGuof98OwTpT/5rBv3/4A953eVNvKR1QVm8t+4gk4hos+JYa4qIGujGT9r +Dx4J8AX6Uyj5vu8KWd0+jGrQm6UQVToLkCX1oJQnAoIBACHotJvdJIblTORlPzxB +RuL4d9J9F4zG5H8l/9TZRE4nbnqeS9LRy1sJTC9Z+zQuQpyQjMsK32J2D2z/qBjE +BEPP4OegMyhZmDlQ5G4uaQzxF8ij/dIzeTRzoIpF5J/GNLu5eQNVAOJU0twQdJ9B ++ElrMhFdBDSAkYuGKAD3sOzK7gKE3rAIfScwr4uwUd2qEIXjZjrhlVXQ7vwuSe4m +bObjFRjLY2TMt3cbMX/gQvYXqQHAsEHRY/qlTxNg8Z1iMz7V+6d7mrlac4+TVm4T +JS5TIVnsDx89lvCjmQH5kUQGJ5oqioOIHh796nBXbuRj7v8mnwhsRFYeOlt/p+Tk +vGUCggEBANpuCrMqgzfxUgCiLfgmxF4WSjV86xF2wcY4HGFTE2J7RTybZKzE6w2q +Tel4bcrHRizww/pp81ihxDzT+jChdqopvqO+yCalHqPD2IKwV3wb/tQaU/ZdwhKD +s1pv33z4B0/N6862OkH/WpIQXFmHdbKvd+IgbDyF3Re+NEcfJGbK9P22w+i15cgo +9PPtdNUzCnulqKuC1C+LsRTzGwEORm2kF80Zq7d99q30oJNFci/JtjFfR0cYx2s2 +eB3zTvVA96s+HoJYuC2dwiIdHpc14t3NOrvLagwat/HyM/Nte0jSbG4jplE7zFf+ +ViXG1d6ba68+JI5FBVzYeOzAxVrwYRc= +-----END PRIVATE KEY----- diff --git a/test_certs/demo/backend.cert.pem b/test_certs/demo/backend.cert.pem new file mode 100644 index 00000000..ac0e0244 --- /dev/null +++ b/test_certs/demo/backend.cert.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBxzCCAXmgAwIBAgIUKnio9q6H+IJijQf1gnEL01XQ+lMwBQYDK2VwMEsxCzAJ +BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEaMBgG +A1UEAwwRQ0RFVCBUZXN0IEJhY2tlbmQwHhcNMjQwNzAzMTMzNDQyWhcNMjcwMzMw +MTMzNDQyWjBLMQswCQYDVQQGEwJJRTERMA8GA1UECAwITGVpbnN0ZXIxDTALBgNV +BAsMBENERVQxGjAYBgNVBAMMEUNERVQgVGVzdCBCYWNrZW5kMCowBQYDK2VwAyEA +P+79mMTuxSPtABHMnovmrY/AVbYIUGOpo47N/GUa2a2jbzBtMB0GA1UdDgQWBBQV +N3P97CCbKBRwUXhGrUkoDC1TrDAfBgNVHSMEGDAWgBQVN3P97CCbKBRwUXhGrUko +DC1TrDAgBgNVHREEGTAXgg9iYWNrZW5kLmV4YW1wbGWHBH8AAAEwCQYDVR0TBAIw +ADAFBgMrZXADQQCOw2Wt5G2s2zMSAZGANcG8//MuRgLy1jBxxM51jsn0dXTZqkOb +K3pmzOjCj2reNhm9mXfYHQCj36jeOAelmWcH +-----END CERTIFICATE----- diff --git a/test_certs/demo/backend.key.pem b/test_certs/demo/backend.key.pem new file mode 100644 index 00000000..6176e4a4 --- /dev/null +++ b/test_certs/demo/backend.key.pem @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIM34ssgtYyj55NsZ5H2Gk3qdUUs6JzqwYxw9jSOiO+Lm +-----END PRIVATE KEY----- diff --git a/test_certs/demo/cnpp1.cert.pem b/test_certs/demo/cnpp1.cert.pem new file mode 100644 index 00000000..6b5949de --- /dev/null +++ b/test_certs/demo/cnpp1.cert.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBxzCCAXmgAwIBAgIUeBx9bqEurioHO4fJD5z8nMIAOZwwBQYDK2VwMEwxCzAJ +BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG +A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDYyODEzMTAwOVoXDTI3MDMy +NTEzMTAwOVowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD +VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD +IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE +FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i +/gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDEuZXhhbXBs +ZTAFBgMrZXADQQCzYs3yqjDLSM/I7C6jy2jyeBtbMWqsFHWDe7MWJdlW8QSBwfhp +vehKrvWcb9brXnYa2A5m0ejbT4TVoWGlylMO +-----END CERTIFICATE----- diff --git a/test_certs/demo/cnpp2.cert.pem b/test_certs/demo/cnpp2.cert.pem new file mode 100644 index 00000000..523e12d3 --- /dev/null +++ b/test_certs/demo/cnpp2.cert.pem @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBxzCCAXmgAwIBAgIUK1fwYdxUatMwHY28/HicU0OdbSwwBQYDK2VwMEwxCzAJ +BgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjENMAsGA1UECwwEQ0RFVDEbMBkG +A1UEAwwSQ0RFVCBUZXN0IExpc3RlbmVyMB4XDTI0MDcwMTEyMTQ0MFoXDTI3MDMy +ODEyMTQ0MFowTDELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ0wCwYD +VQQLDARDREVUMRswGQYDVQQDDBJDREVUIFRlc3QgTGlzdGVuZXIwKjAFBgMrZXAD +IQAY7TzlgF0tBNzc5eCWdmKEfwGwIbFMmWMCmwYpLUweSaNtMGswHQYDVR0OBBYE +FNAmLiDRcs964PIgpp0i/gFXq8OrMB8GA1UdIwQYMBaAFNAmLiDRcs964PIgpp0i +/gFXq8OrMA8GA1UdEwEB/wQFMAMBAf8wGAYDVR0RBBEwD4INY25wcDIuZXhhbXBs +ZTAFBgMrZXADQQB3lqqrqHme+KHmhOe3QMLcPQGnaffxQQ+lrbn13cGaFnKYVBO9 +s3iidmpvZwaAOMeibNd4Ew1zEQtlwOAvdLkH +-----END CERTIFICATE----- diff --git a/test_certs/demo/listener.key.pem b/test_certs/demo/listener.key.pem new file mode 100644 index 00000000..58ffea3e --- /dev/null +++ b/test_certs/demo/listener.key.pem @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIH9AJcray3rNikzp7oOobt9JsDSHNQyxcT/gHP0kvnAS +-----END PRIVATE KEY----- diff --git a/test_certs/ng3CA-gathered/ng3-proxy-athlone.cert.pem b/test_certs/ng3CA-gathered/ng3-proxy-athlone.cert.pem new file mode 100644 index 00000000..4e583663 --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3-proxy-athlone.cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFJDCCAwygAwIBAgICIAEwDQYJKoZIhvcNAQELBQAwXjELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRMwEQYDVQQKDApIdWF3ZWkgSVJDMQwwCgYDVQQL +DANuZzMxGTAXBgNVBAMMEG5nMyBJbnRlcm1lZGlhdGUwHhcNMjQwNDExMTMzMzUy +WhcNMjUwNDIxMTMzMzUyWjAcMRowGAYDVQQDDBFuZzMtcHJveHktYXRobG9uZTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdgZ0LiLST+/k8bwCKwxXPK +S1YI5rt4bpD/pZPvOhfrJQPWs+04jPzRAPIbc1hR1ZhfFoDCOcIVZvt0IQAaCOhS +QoRh0uyEYgU18FnhqjTIP9u4Q3IaMqTloYqRseUQVuqfOrEbv7yOvaJp3kiUYlB9 +AoiJSfiQCowPYEFm9A+CDtNbP60/oG5l2nGbyDWGgGm6ZvQCHhSLnn415PrVx5TT +xKlOvaLmkxSqe0/Zh/fA1Z7zpMQXtN7901buGmMs65DycRgzL43gM7u84uz2L+gb +vjeFIWRO6vCn1Ja2l7421Vn5sFe6vru8FwsyCxrJLvuoutdPKw7Vzq+HtcOpxHsC +AwEAAaOCASwwggEoMAkGA1UdEwQCMAAwHQYDVR0OBBYEFIpn0eocIO5VRXf1E9Dm ++C9+K/3iMIGNBgNVHSMEgYUwgYKAFLW7/0ewDoxzO+JL0b7+2NzhDMHxoWakZDBi +MQswCQYDVQQGEwJJRTERMA8GA1UECAwITGVpbnN0ZXIxDzANBgNVBAcMBkR1Ymxp +bjETMBEGA1UECgwKSHVhd2VpIElSQzEMMAoGA1UECwwDbmczMQwwCgYDVQQDDANu +ZzOCAhAAMA4GA1UdDwEB/wQEAwIF4DAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB +BQUHAwIwPQYDVR0RBDYwNIIYYXRobG9uZV8xLmlyYy5odWF3ZWkuY29tghhhdGhs +b25lXzIuaXJjLmh1YXdlaS5jb20wDQYJKoZIhvcNAQELBQADggIBABHAtoeyho8A +x6RHOjQWOpkLjxmkhFTtBhUs82x6ur7uoQc1qi7+slQbh3enzGTX57n1aKRTQvx1 +2SQ3Hi+LJXWTpyfuGbqYg1PPe6rOAkZlh4XOyBfkDm31oAhSjzu3wsSgfCfMwZRr +d15ETTABcQrgbm4eAMCOXuIKfSUwrPZ6rjeJpxJ411fRDVqQG56ZRpK9Il1LMu5G +r5DkuYxuL+ra52Gqd4icWCugWFq7xI0iwIahwgFUYCh7eVbLGruhgSZXHpG9v+rw +Y82CJeYL6LYk7pZ3a/pIqe6InIKjrnqEcm+HNyZPtxW7s6/KWaer04WA3n8uHWx7 +zz6GKFgfetdTDqIb8dyxvzYVL5ef6ZyJtfGqi7+8IClmli2Sp/Xmzj2yawP17L8R +/laXHuGWmp1cmJhfd1Xb4KOJQdPYuJQFL04nHDVNS/Ugy0QzpaLgsiTC8CmQElGM +cDAQW3wg+NJpmOCzb1Xsnejik7taz040IWrVPMuZDYL2NUigsD+FyOMBFe4bnKcg +XRTpUCGLmKJc+vVFLhbc3yDq4aBBsThnmhHaZpBG/zG8lSGCeG9n+TJOY/M7gprQ +JYFiJL2FftNg/eGB6rnnRxwGu10LCIeICf8IUOYQNcwkVuxhhFiShshZgvYgBm2U +rWvKMqk+7r+vI1o3VgRzVIK2y8nkYgwl +-----END CERTIFICATE----- diff --git a/test_certs/ng3CA-gathered/ng3-proxy-athlone.key.pem b/test_certs/ng3CA-gathered/ng3-proxy-athlone.key.pem new file mode 100644 index 00000000..601c5fa7 --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3-proxy-athlone.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDHYGdC4i0k/v5P +G8AisMVzyktWCOa7eG6Q/6WT7zoX6yUD1rPtOIz80QDyG3NYUdWYXxaAwjnCFWb7 +dCEAGgjoUkKEYdLshGIFNfBZ4ao0yD/buENyGjKk5aGKkbHlEFbqnzqxG7+8jr2i +ad5IlGJQfQKIiUn4kAqMD2BBZvQPgg7TWz+tP6BuZdpxm8g1hoBpumb0Ah4Ui55+ +NeT61ceU08SpTr2i5pMUqntP2Yf3wNWe86TEF7Te/dNW7hpjLOuQ8nEYMy+N4DO7 +vOLs9i/oG743hSFkTurwp9SWtpe+NtVZ+bBXur67vBcLMgsayS77qLrXTysO1c6v +h7XDqcR7AgMBAAECggEADD6ueNwdO61xfvhMV5UVu2PMNq97RD5ERCF/UNObdMab +Ul4IVORpPEm1zijDb8Qu4xnL7wJ+u53G4mCiejgfHe2Xajv70UzcHEfQyCRk6LL3 +wQ3TDpnvTFNSFYmXLGNRgz1eRGxY3ULDAMuRvFForFrRIgOUiHCIRUvYdIvXJ3CQ +qc3xrNlz8I5JQ0Y5y/ydBRbQahzsV9w9JBSxKdeaZPQgrcQiR2qClvLNO2dbl1iO +qdXJTQkq3RSXWUlnFIewOy4m0H0d9A+zJ8kBiS0HjBTgFSJFV8X0ycFCCDR+irWj +XFFqzJUCH7z5b6co4cQskto0czim0Vu9OaT1K1XF4QKBgQDfFzXYuzmvzWf1wsVQ +UjwnIr+mPnvEp9BA3n4jNx5IyuOquzXbDeeeIqFhpSRkiehyr5ziGJQcWecxqexs +2LY07CazLe2B4BASde7LGbJYrGZGC7rd/seJjPUtf9HZiHn7xjBspEcymJnQ4/jT +51cSc00GqfOdNuBTLJzHwmgasQKBgQDkyaehJ6HHioaWKW6qHsvXzTD5QZ6dz6kM +UdC8tLiwTLwpVCCkR7t5ZneAsJXIXoHzOVf10VPmexLqUU69ovckXcqIo507Hpou +aFS4F4nB61ogNU7YGK2R33ciYpxQ3UFUVTcpfy6vQw5a+ZUmmvyt/WafzBpb6j2y +oV8krzaE6wKBgQCzTDQ6vD7gzHWBT8jFhVJTIdpmAlH2++ikOi2mn+X3GxlfXpcq +N0KwFaUelVtqcmwfwISWMqrXHmpxv+AEmyfV4PY5i/AXMb25GI+TLFPqcKTCJytI +wDwekOt8Zf6Clu1zLRY+Osn+ICd/e7nAlW2lpoWBlZX2BOTF9FUNN3H0AQKBgQDh +JKTmjSbCQ8rwCLPTi9zTcCSKlY2vBQT5j6XXngzFya29CgHZSJHwBOsC5ENx27cM +EFlEGYv0GxdhKURQkAefbQD7d+8pl5Loqlj8lIVHJQEbAvqk45wjGgKV/u6PETzR +7c4L8xGQ/W8qDcQ+KnUQD3cnN+aUny26FN2ZLyd6owKBgGqGFqkw+oV7s+Swibe+ +GI9O4DYt7r4P+mdGlG6nK0Y9VholoRbNf0XBy/3mJx9av1WRcotRVTWb3Xc8smop +4kMT98RLoJXNAyJcQuEUJUCYI9R0Y6c/JwYMjv0rzc9W3evsZppzSScCrYZSkHs4 +eXRHuC9rVIhZOb/Ux3xdXFbK +-----END PRIVATE KEY----- diff --git a/test_certs/ng3CA-gathered/ng3-proxy-dublin.cert.pem b/test_certs/ng3CA-gathered/ng3-proxy-dublin.cert.pem new file mode 100644 index 00000000..4e7fe0bb --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3-proxy-dublin.cert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFITCCAwmgAwIBAgICIAAwDQYJKoZIhvcNAQELBQAwXjELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMRMwEQYDVQQKDApIdWF3ZWkgSVJDMQwwCgYDVQQL +DANuZzMxGTAXBgNVBAMMEG5nMyBJbnRlcm1lZGlhdGUwHhcNMjQwNDExMTMzMzUy +WhcNMjUwNDIxMTMzMzUyWjAbMRkwFwYDVQQDDBBuZzMtcHJveHktZHVibGluMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwubsPVdU6HRCJVYqa5NZ+Wo7 +/7XGhEgOIVkwbSPJFHS3yhPFzpp/EBnp4wymaoY9Y53a+KZn5W6s4xGsG1bxrIGb +32ucOIFTZowF64YKWUyAUSO1QfrV2bhq/jHVPY6Imx/AyUVOTwmiKSEraEVEkegi +jA9PPFGU5AxEGnk79B8SPBaMp6So8ON3fGUssoqvJx+No8ysuiIuJle+0ct38XT6 +E1pcU2ZfVsUGEODyke0ckDJedZDx0zHnk/t2wPMTFIEuUmlb1LR6oEpIj951n6d5 +MiRgYM9SUKAT87GildMe5IWPBcqt81PSsQd1M/3J8y2X+s+RYHwHQ8/oOXeRWQID +AQABo4IBKjCCASYwCQYDVR0TBAIwADAdBgNVHQ4EFgQUk7iKIZbG/pR4aoxWZpok +GovuX/kwgY0GA1UdIwSBhTCBgoAUtbv/R7AOjHM74kvRvv7Y3OEMwfGhZqRkMGIx +CzAJBgNVBAYTAklFMREwDwYDVQQIDAhMZWluc3RlcjEPMA0GA1UEBwwGRHVibGlu +MRMwEQYDVQQKDApIdWF3ZWkgSVJDMQwwCgYDVQQLDANuZzMxDDAKBgNVBAMMA25n +M4ICEAAwDgYDVR0PAQH/BAQDAgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF +BQcDAjA7BgNVHREENDAyghdkdWJsaW5fMS5pcmMuaHVhd2VpLmNvbYIXZHVibGlu +XzIuaXJjLmh1YXdlaS5jb20wDQYJKoZIhvcNAQELBQADggIBADCaJVgfUfAzh7vw +bbwAIxn8tV2Ju277xHT1vE/VSQp+0vH3ezg8nfAqe/asvAjzk9UPT4OJv+6C1mnK +SF748XhViIp9vvCrJdML7gySZRoNN2v/E0ZUrTg7PL9phnS8wAzpno+fhhgDcTGQ +8dFAr8BG0i+bvBs5feB2DfVp0qfm11JzKXVfFhemdVV56fg0WpCCtcy7xsxFYKTz +t9cAjfu8ZOY5v+y6IRc3A8jTTtHVbSPjzLpe4o76o6ZdZwAgdVoCGkjuANX5IR/+ +x0GZ/COiBdx1+2brp4BposoSiBxs1p5a2/uIh/+WogYbu4yVgaPJWuVV5E97WKg0 +oO1CT9R4H46lDhEtoZZ8no+Ijfl06+NDpWDcJsg8Px+2AJsWpyNoOhCbZPMGN8vT +0E/6Qgt+IXY+bx++fJC9h15hG92Lk5xc8Pa5ElXX1YFsgd5EngY6XWDZKhTeKyUV +rl+1SumllpH5xqdug0WMr1c6dwobquMJ5QlZ5cs6oUub5Ql8LbkByXuGx0K3EEIN +RPboUztvr61JsyWjN9XsDhgDU75sYKagK6s0pC9YfizBBmwa2kt1oz3PB4tbYiXs +o0M5Mh13Y0/IxDeVY5LIT7syLLt3UUHNEW30ETP/cafiZ1ElWBA6mHoqjpbch6Xz +rqOtPLbbIlGAS/ATqrPrWsgFx533 +-----END CERTIFICATE----- diff --git a/test_certs/ng3CA-gathered/ng3-proxy-dublin.key.pem b/test_certs/ng3CA-gathered/ng3-proxy-dublin.key.pem new file mode 100644 index 00000000..9622db5d --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3-proxy-dublin.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDC5uw9V1TodEIl +Viprk1n5ajv/tcaESA4hWTBtI8kUdLfKE8XOmn8QGenjDKZqhj1jndr4pmflbqzj +EawbVvGsgZvfa5w4gVNmjAXrhgpZTIBRI7VB+tXZuGr+MdU9joibH8DJRU5PCaIp +IStoRUSR6CKMD088UZTkDEQaeTv0HxI8FoynpKjw43d8ZSyyiq8nH42jzKy6Ii4m +V77Ry3fxdPoTWlxTZl9WxQYQ4PKR7RyQMl51kPHTMeeT+3bA8xMUgS5SaVvUtHqg +SkiP3nWfp3kyJGBgz1JQoBPzsaKV0x7khY8Fyq3zU9KxB3Uz/cnzLZf6z5FgfAdD +z+g5d5FZAgMBAAECggEADSoAudHsu7iigRgVTGfK83Qv8kEKVAPz5F+8TBrXKYkK +otz8BszbmTBO4lWH5fkEK3bNg+7lZJM5nSqj58LTFvY4G3qOtihSBEKf4b0PdOGc +Zx6UzwyMpiR1IY8nGn3Gj0exQ0bpl6AMcl+5qVTIYVx+/NvJ28ac2lxTu0AGAyJ5 +8aw3Fw9qsUKd+ChZKW6wF/RIH2Llw4f+hEtHv/rfkCooMwnoMgEB4jgKVrNBh6e/ +k3Gg95RKZnfzk1YsURZ+pc0Tc2MofXzlib9CWpjcLMG6tW4rKxHV6KxvshYFQhDf +4xQayq4IY1BuH3KiOv5GOi0we2a8C/OdpnSVCcPVYwKBgQDfVB8Uh3Xq8rHwP/3n +g7mm4CQaLItv7lf5xmKpF+ELSNASvjs9I7wDO1gCqSlv83N5kqmuVbgJM+NjuvWh +WLDkTcyaHaSVNlw+kUDTmI8XuY7U5+Whsq/k+D1nPvZlhSeQD0xjA72tsKCeOqnw +1+GYhvCcV9HphDd0jVp8Z30d/wKBgQDfajLR9EH1CkqsLPjvMnf/BASryTQojvaH +/EOexs4y/x8VtCLH3N12UlBwslELfLaUbyLRxX8YLORFt8h2ocjr0Ro8VIq31Wig +/7GhgZPa12IXoTRx3Q3vCU6dLSg8Gxn2/ma5KcN9n568SplzfnaEKA6tlcrStEum +3RP42ycApwKBgQDG2e46p0r1glex5ii8Jev+jIFY1OTrKzwmaOCcU2jMVW7/4WZ5 +0AGulv6Mj/cXeZwOWerAEg8mEo+b03K20XPgNO3DJDXRrm+APnGMdsCijVSCemFb +EvHPXMG/1q4qQt+xRA3t8/zGVQkPGokKrA5xc2V2sDN2S4XdP6ecMcGUHQKBgGvq +ODIB8foSxo70CipADoY3wqnXvkH5DEwTJfaDwQYRBUrOgB5aKP8GM6S7/Tm3O9nm +9MKuyv06cejbJRFohEyj7DBIBdlWU/uwhnSkym5iqxcO3u5BAIH91ONG9LBTxu7v +Y179NVhwyZPZ1NVcuJKNZsmiGR49yG0JTKRf9y7xAoGBAMB88aXGLXA6evWDSpEa +lKk26x3JTTIa1D2zCRVhfTTVnRA6rM0Ze5IDWf5fyL8+OGkv7pHrEAImc/T3exYs +4HdSP7hi+4uoWis/bgTCQIVOaHhodZkHWtKN1fxXOD2wuqTHAnuJSZB0wsW+UAHQ +Bq2WUWr3pHn+3b8NF2/Pzpl3 +-----END PRIVATE KEY----- diff --git a/test_certs/ng3CA-gathered/ng3.ca.key.pem b/test_certs/ng3CA-gathered/ng3.ca.key.pem new file mode 100644 index 00000000..7d90ba23 --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3.ca.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDl6ve34yGBRlwO +SrfzLm6eKhwgkEN7VlsTunVtPr0H1w7u3qIXKXXKdwe3ELtRmjGHdesTFU5ryRRE +pcPe3Jh4Z6mFuN4dlUIbtiV/NtaPj6VQ6sWLRf9rwtwVAhaJxHCfezgdWLdM2InQ +K+XTJ6QnWfCnoZH9DEI6Tw08hyWpBMzugAuor5o5pigfuqOaU09uuPwAKsrBZ/5t +3gMoEfbd+jh81dFxdbLarb2TFI1s+tT2Jz5vDcfbk4x/Gm5X3/9kohcqNNGqDC4w +A2CKQIA2yQJBK0oa19250cjNHpTsmpJd0H8/K5m7MBjYG2GxkcGSgOuPNEbvm1zE +kg1xkgVgJVEFN/XWHm3MHZ8XphJ9hVFggZnjkTeE/WZsEAU7rbs9j5lWlhh4oekh +FNPkedpoKfbAs6LzI4Oy45jHEW/y0yijBgpoVKfG/DVAkF/xvApqzBqNSpeTPg2V +tdyZeZ1oA8P9jd+j3o5JCop4w0TaRyTX104/SDmh0IZMtYPJmvblAIi8+wacmk2I +FSCpVbLC5GYkwftgf6CuL+ph6VSwMw6zeqWYyvu/RiWXuy/GkWRrVPeGyP8txpiU +K22UQW8GAx/TOU6Gpq8G6dT6oZyfhDqo76CNzA6uprEa10dRbtRkJ4Pj/3o2PnwK +U2QEXNPLeGZkU817omMF03x9eAw7EwIDAQABAoICAE/Y0j7ienhS3xV3jXq+3VvA +dBQ7BbSHwQmQ78cIys02OtFqMBCrLWUrAVrqTKOCUUEiComOFLYKFyfgCa4UHj9y +gMUX9st/Un4dO9h6SvmAgYEgh4pevN3HxnblSr59oj9Ao7RNYxsk+Hd7R02VrsSb +z5/n1LN01ms5mPc0WDfimukZPvNOSNlMgxLB1p9531QsmhfUxKV2YD33pFD+LCXb +CFMSTrKAlaSsQfIaaSM32U3DQ0N+HbJ8A4cAvuF1wVRJlpHp1gagREoYYiahT+W2 +2TmYtxurbC1kVcAmpsGkzMAX8JkXo0QG6BnlKIBqhjdC3ovVM+1ynz0FG/YdKBAb +UnzmuihB+9aVi2vlOaeg0dYRL7xFI9yYTzX0WT55Dmd/pCS85YYw4hWw2zwZPYJ/ +4zIoICBAdUPsFiKBD6RCjCxerSPY8P0LeENQRdlQvB00AFj1rWNHwhTM0BA+46b0 +0a51l9fzRQV1lYCD4fAbW9rm1Yk+soKsGYsoaQ9EfWYcRJ6wFhkEbgteAs4JTU50 +T31ul6LrzLTUpYP3htLp25v+K5wB8BsFV9SqdeonPlCIpDKN37uVUgMTilqjWBYR +nRLU0GL97oD02e1/9Th1J8U05udtEOnwFXut6zPsKfG8F9N0xbOGkgHL52ArsNIk +2sSPEG1p7rkG86zE3mTBAoIBAQDsC+CeN1eeZafVOKPhY1f+9+POHThDouVY/ij7 +vPPT1Y/P3L9/ttCHGAPanoQwvgINrJ6LxspDBH9iZdOnC2eFVkx3Zh7BGN2PJRql +tgZsWIS8zBi8ggnM7MTGlY4AXsNsG+aM6aZZUuamEzJslrv0SJROPlqnwSvKYXJM +pf9VfJ1kVw1todzJ0rwasLEbBkJtt98Jy74oDMr7LFExXhEdMoCg1smfhckKnzlp +GUwpd9fq5cuy/5FLHkYXxw2pKljodvbbxCp/QtiyVuKAUgKUJeeSt0WNkz5dzCur +6t6ZTn508znoR6szaCw8dfwZjP4/xbMUoc5CGEjs4p2kWb0HAoIBAQD5Wnde5/zJ +84eEk21fTTDsu6ujUJs7OQCBoZmxOICDo6AX9+3opRvXDpaUoWOnkwsq5IUwYfWU +lGNFKFlI5Si/pP/INqkRv8uAcIhw/pMk+3AAZZ1t45X7R+hAqCs/ysUSHZHtizV0 +WW4mnVvULZQkHW4/Lnx4NMdiPREY0xX5Y9hIes7nO29Xdr/3CeJROMmC2fv0mOWF +M/WMFsf0wpWKIlGx3DEwLv/6tJjIkM1m1s12+Tg6amdtBIN2wALi7Ewsh+CSxhxJ +Dya14qsM1bigLJL4LLhoUffJfFUUycGqcgjnGOcjD+WEfdzacszuDVPFZRus5KF4 +z4THJPH/TZqVAoIBAQC+X3XxJMFNxr7tgJjrlqvJvzPDcF2Ib0B53cJABczA9GTg +SDFKV68mxX+WOnJyvUv6q1XTdah7FI2IVoe1GcWMDGXvhjUXl25ZnJs5Ou1DRMEJ +Y7IvltFArVSmtMeqFMOX1YjD7U10jR/ooWLxoUBFbKPv1upWkb03pBADMX4U/EQU +QQFbgkgHniaHgCnWmd3Jj5oa1G+w7WBn4v44yp0S50BGd/R06/mQz4oF8ZkzeKxz +jpIS83vDEc+GLcweu0CpfAjshfGgYdFTRC+5PmtGAKwb8CPwYujRzXELXonZKy1i +NShm5iZ1d3b17FI+aTBFTrV31mf7doqaQsrRWm7RAoIBADQxoYb2HE3Rce5oRZ6L +M9B5wAWhW7zbw6VexCL6ZrwnRd1+gggOd3NelFpsJcXv24iQ6OgluRfur5DMT1QY +05y51uh7uIwEj35FD+pG/1ngX6NVocxctQoyFQVjrhneQb32/pKwbvH5vjrK+WvC +Vp3woyEKg/dNZObFTYzasmDPa3hQbHquUDIIo0n8gajqsBN5uWmLj4+APseiBRzQ +liih4mPOaWVikSZTIx/oX3ZFRCDBLencV+XT6gizTGdfetpfXryaR1VrnHi6B3/a +RJkSe3kDM15yueEs60o8qRMaZ46oAcmvDCXdipsXKJozo/NPpKld+galuW9N5nHA +NrkCggEALB+irKF8kwYOIGoESalMRwIyQL0rXuSBVx+VCxuouVPKh/LsJaC9x0H0 ++X/4KGDtjbiwYkQFDTeSCquKnSqS/yJYANapGFD+HoUeY4J2ck5J8lpsF/Olq3CN +j0PbyYwAF2ilC3VvWB6LUuzjviNZIykTCpE3GpXrie5NnfW18Tb3zQrIUttG3yMg +YOtUhgeY64e/xKC1mTJWe0N6YAaviQ3PnZbaPolsQEvEJpkxil11K2W6YTU+fH4g +w00+lWQs/8zsNX76apJqApqL0nf4WRsrFSpuN5vcrAgLf8VMTf6EJiebrJJTxAFZ +tdq3hLt5hMPpsOhvDI7toDqvC40Ciw== +-----END PRIVATE KEY----- diff --git a/test_certs/ng3CA-gathered/ng3.intermediate.ca-chain.cert.pem b/test_certs/ng3CA-gathered/ng3.intermediate.ca-chain.cert.pem new file mode 100644 index 00000000..7a631a64 --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3.intermediate.ca-chain.cert.pem @@ -0,0 +1,66 @@ +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xEzARBgNVBAoMCkh1 +YXdlaSBJUkMxDDAKBgNVBAsMA25nMzEMMAoGA1UEAwwDbmczMB4XDTI0MDQxMTEz +MzM1MloXDTM0MDQwOTEzMzM1MlowXjELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExl +aW5zdGVyMRMwEQYDVQQKDApIdWF3ZWkgSVJDMQwwCgYDVQQLDANuZzMxGTAXBgNV +BAMMEG5nMyBJbnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDYS//VKtBdtXCz0qDXPUYbttb3VVZzIesHiX40IdV1ke9lhPmBjfgSt3ZM +Btibxnx3eFJiO39oIAXGoLJsOgu+p4VF39Ux7VraA1U4yYIhiP7W24T5DTr/I4ey +kVN2PVZLQxI8hgZ1hVsC3dkabxC3PQaxDYzCrF7nxG47dk1zi6G6m7w20RFdIBH3 +JncHy29p53a0I9DWXLoBVygSBlmeUaiX1gQ14hNTO4fEKOemH0eVm4RUKunJ7Imu +dKf+sMjNUbhV7AYbmeJobktbLoz8qN955whzKypa8ENoIWFeH8G0zNziivcHyG8a +wELonKBbd8HWBxsWUrOP5bbSBnJ/KW+xH1R9LNUNZEyUKus4aXROmWUI23imxmNY +1qXh3fOrlIIjysKCcl6+Pzqf/YgCqhzOvxFHjVPTjVY4S/W7EuAEzegvHhsLgra8 +OAT4LsEwWwvjLs+hY+T9s1Xh9xTuGUXwqFoL5BKvcxU9hKvWAa6tYkbvC+ciUqbm +s8kDaJhtSTATtEy/5n0eOy0+ptcPoSGyMWy3QAsB97fbwDCaWnFw3DtLdMXPvomF +e5ND5IoeskCPqPRceUDQ99v94utqv8zPx+lcYeF5uZleVW3WnVKUy1K0PAXFucVm +w2WZCx5cop4LlefVQ6fKtemVHpkM/QqcEgWwFMnHuBF67e0oIwIDAQABo2YwZDAd +BgNVHQ4EFgQUtbv/R7AOjHM74kvRvv7Y3OEMwfEwHwYDVR0jBBgwFoAUWBCdbsKu +081XNkpOvxf2Zvx3AeQwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMC +AYYwDQYJKoZIhvcNAQELBQADggIBAJaOXaaRDMNpxzGqBRW2dqYasGA2yd29DjeA +TFirROZvbZfCLll6Lv/usYdM/MAT9F0/SMkxfT9kM87E5EDgeV2Ja39TA41opOyp +4K+/0/UhI0JRkclf9tlNPxes8PMq+5g1581aulvB5szB9X77xM6z11+wd8TtSQS8 +7vkVIJyA/gYsPzsUgviTD5eBcR+0XF5D1W7qYo9+Ww0okPrGusfy3rlQBcn6apTJ +SUI/BS3XuP/s8znmWp8NxCiHbAJe74BSbs7/l3F1KwmoiVqqLBHHck0AQwVarQgA +bj9JmVSdJkE/nkW7g6zmv903AeqDKpeng84j8chro0ySO+lRFn8JR2hLyMGsSwHg +r5ws1F7WwIwtwOURvu7X68+BJASBEZ72j/soIwQSrCSAm89VVaxrpVYcNI4/7Odv +kI/5g0FPe30y+GXVAc2Vq9NIelRqhc39AP1OR9M5oD9PrMv6RSAtau7fo7HfnKkH +5Ja2JrNHNoE7EOxU4936D611KzaRxgqh5LPnZqw19AtKA7eUcsSdyp1gOy3Watqn +8jdIL6QPOO6S9LkYU29EtP8j70v5NzvNoJwToos5L1Uhgv5m1+AQ79Ib34a2mfTD +wttoM3E5DFS69wVsB7fZCScmnG42IHAYBRvBzdcMpbAu9vb1qlSGD+Lzmml/jodo +rx8+bBxx +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIUcwMpj72xkIjGJ5knUariL5Z9VZ8wDQYJKoZIhvcNAQEL +BQAwYjELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZE +dWJsaW4xEzARBgNVBAoMCkh1YXdlaSBJUkMxDDAKBgNVBAsMA25nMzEMMAoGA1UE +AwwDbmczMB4XDTI0MDQxMTEzMzM1MFoXDTQ0MDQwNjEzMzM1MFowYjELMAkGA1UE +BhMCSUUxETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xEzARBgNV +BAoMCkh1YXdlaSBJUkMxDDAKBgNVBAsMA25nMzEMMAoGA1UEAwwDbmczMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5er3t+MhgUZcDkq38y5uniocIJBD +e1ZbE7p1bT69B9cO7t6iFyl1yncHtxC7UZoxh3XrExVOa8kURKXD3tyYeGephbje +HZVCG7YlfzbWj4+lUOrFi0X/a8LcFQIWicRwn3s4HVi3TNiJ0Cvl0yekJ1nwp6GR +/QxCOk8NPIclqQTM7oALqK+aOaYoH7qjmlNPbrj8ACrKwWf+bd4DKBH23fo4fNXR +cXWy2q29kxSNbPrU9ic+bw3H25OMfxpuV9//ZKIXKjTRqgwuMANgikCANskCQStK +GtfdudHIzR6U7JqSXdB/PyuZuzAY2BthsZHBkoDrjzRG75tcxJINcZIFYCVRBTf1 +1h5tzB2fF6YSfYVRYIGZ45E3hP1mbBAFO627PY+ZVpYYeKHpIRTT5HnaaCn2wLOi +8yODsuOYxxFv8tMoowYKaFSnxvw1QJBf8bwKaswajUqXkz4NlbXcmXmdaAPD/Y3f +o96OSQqKeMNE2kck19dOP0g5odCGTLWDyZr25QCIvPsGnJpNiBUgqVWywuRmJMH7 +YH+gri/qYelUsDMOs3qlmMr7v0Yll7svxpFka1T3hsj/LcaYlCttlEFvBgMf0zlO +hqavBunU+qGcn4Q6qO+gjcwOrqaxGtdHUW7UZCeD4/96Nj58ClNkBFzTy3hmZFPN +e6JjBdN8fXgMOxMCAwEAAaNjMGEwHQYDVR0OBBYEFFgQnW7CrtPNVzZKTr8X9mb8 +dwHkMB8GA1UdIwQYMBaAFFgQnW7CrtPNVzZKTr8X9mb8dwHkMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBxZKgIcPVJ +GuetBMqdRehz8E+2qHLwY7OpRJk+vWOgL737QcMhH4QOZ8OQz3zi+MUMxd1IGMZA +Pfqfe0i1L8I3MR6S4W46ko7EPINTUfsD+JeD6txnLvRb7xAUR+zc5uvQqgfsbcLp +Rz+U8zG/8KAATfDKjqeGIw2S7NYHUGvb8d6tJ5ngiKSJ3VcXgt0Vi+9yXYG9pCa/ +YAhu18YW4nn6eio798HSXCPW4ARRcaAmrW1PkyK5eFzlXi7VaJJwsX3f0m1t4R5Y +cPVYkMgp0tU4cUEoPXZNrzvE65Uf7cZTld9Ub4C1cymgDBDZ4sJ8IzF4DpedgO6D +KaO6TYP3+vacYUtYSqmVUa9J/+EUBTLzZKk2+jSB0pAPdjduBzyaugY+9yD1+FYy +pnp2nYF2fGB2cozwyQaTEMdJeLmckgFw/K5mSPoC8a4DaSqvAqE31PmwGclAZ0LW +gqebW7FWrRBRtReUaSq33Xo6GHpzTbB3XLXUj+6EQRVhDf0YN77wsTq5tGlVk4QT +MEh48NiSTb79y7XkMy7x9m8vU3D49zVxZcZvtIrl6QuX75FRTBs+uD9FuNTFbP+J +lGXVu7Pg3PCp+eyzco0xpWYd4ASrDmI4uJxfN2L/O5epCwO7/va9QnUocpma0YwA +384kKlzrCZENGOiconYR1BLINoVCYUNPFA== +-----END CERTIFICATE----- diff --git a/test_certs/ng3CA-gathered/ng3.intermediate.cert.pem b/test_certs/ng3CA-gathered/ng3.intermediate.cert.pem new file mode 100644 index 00000000..4772aa0d --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3.intermediate.cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCSUUx +ETAPBgNVBAgMCExlaW5zdGVyMQ8wDQYDVQQHDAZEdWJsaW4xEzARBgNVBAoMCkh1 +YXdlaSBJUkMxDDAKBgNVBAsMA25nMzEMMAoGA1UEAwwDbmczMB4XDTI0MDQxMTEz +MzM1MloXDTM0MDQwOTEzMzM1MlowXjELMAkGA1UEBhMCSUUxETAPBgNVBAgMCExl +aW5zdGVyMRMwEQYDVQQKDApIdWF3ZWkgSVJDMQwwCgYDVQQLDANuZzMxGTAXBgNV +BAMMEG5nMyBJbnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDYS//VKtBdtXCz0qDXPUYbttb3VVZzIesHiX40IdV1ke9lhPmBjfgSt3ZM +Btibxnx3eFJiO39oIAXGoLJsOgu+p4VF39Ux7VraA1U4yYIhiP7W24T5DTr/I4ey +kVN2PVZLQxI8hgZ1hVsC3dkabxC3PQaxDYzCrF7nxG47dk1zi6G6m7w20RFdIBH3 +JncHy29p53a0I9DWXLoBVygSBlmeUaiX1gQ14hNTO4fEKOemH0eVm4RUKunJ7Imu +dKf+sMjNUbhV7AYbmeJobktbLoz8qN955whzKypa8ENoIWFeH8G0zNziivcHyG8a +wELonKBbd8HWBxsWUrOP5bbSBnJ/KW+xH1R9LNUNZEyUKus4aXROmWUI23imxmNY +1qXh3fOrlIIjysKCcl6+Pzqf/YgCqhzOvxFHjVPTjVY4S/W7EuAEzegvHhsLgra8 +OAT4LsEwWwvjLs+hY+T9s1Xh9xTuGUXwqFoL5BKvcxU9hKvWAa6tYkbvC+ciUqbm +s8kDaJhtSTATtEy/5n0eOy0+ptcPoSGyMWy3QAsB97fbwDCaWnFw3DtLdMXPvomF +e5ND5IoeskCPqPRceUDQ99v94utqv8zPx+lcYeF5uZleVW3WnVKUy1K0PAXFucVm +w2WZCx5cop4LlefVQ6fKtemVHpkM/QqcEgWwFMnHuBF67e0oIwIDAQABo2YwZDAd +BgNVHQ4EFgQUtbv/R7AOjHM74kvRvv7Y3OEMwfEwHwYDVR0jBBgwFoAUWBCdbsKu +081XNkpOvxf2Zvx3AeQwEgYDVR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMC +AYYwDQYJKoZIhvcNAQELBQADggIBAJaOXaaRDMNpxzGqBRW2dqYasGA2yd29DjeA +TFirROZvbZfCLll6Lv/usYdM/MAT9F0/SMkxfT9kM87E5EDgeV2Ja39TA41opOyp +4K+/0/UhI0JRkclf9tlNPxes8PMq+5g1581aulvB5szB9X77xM6z11+wd8TtSQS8 +7vkVIJyA/gYsPzsUgviTD5eBcR+0XF5D1W7qYo9+Ww0okPrGusfy3rlQBcn6apTJ +SUI/BS3XuP/s8znmWp8NxCiHbAJe74BSbs7/l3F1KwmoiVqqLBHHck0AQwVarQgA +bj9JmVSdJkE/nkW7g6zmv903AeqDKpeng84j8chro0ySO+lRFn8JR2hLyMGsSwHg +r5ws1F7WwIwtwOURvu7X68+BJASBEZ72j/soIwQSrCSAm89VVaxrpVYcNI4/7Odv +kI/5g0FPe30y+GXVAc2Vq9NIelRqhc39AP1OR9M5oD9PrMv6RSAtau7fo7HfnKkH +5Ja2JrNHNoE7EOxU4936D611KzaRxgqh5LPnZqw19AtKA7eUcsSdyp1gOy3Watqn +8jdIL6QPOO6S9LkYU29EtP8j70v5NzvNoJwToos5L1Uhgv5m1+AQ79Ib34a2mfTD +wttoM3E5DFS69wVsB7fZCScmnG42IHAYBRvBzdcMpbAu9vb1qlSGD+Lzmml/jodo +rx8+bBxx +-----END CERTIFICATE----- diff --git a/test_certs/ng3CA-gathered/ng3.intermediate.key.pem b/test_certs/ng3CA-gathered/ng3.intermediate.key.pem new file mode 100644 index 00000000..6cd62747 --- /dev/null +++ b/test_certs/ng3CA-gathered/ng3.intermediate.key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDYS//VKtBdtXCz +0qDXPUYbttb3VVZzIesHiX40IdV1ke9lhPmBjfgSt3ZMBtibxnx3eFJiO39oIAXG +oLJsOgu+p4VF39Ux7VraA1U4yYIhiP7W24T5DTr/I4eykVN2PVZLQxI8hgZ1hVsC +3dkabxC3PQaxDYzCrF7nxG47dk1zi6G6m7w20RFdIBH3JncHy29p53a0I9DWXLoB +VygSBlmeUaiX1gQ14hNTO4fEKOemH0eVm4RUKunJ7ImudKf+sMjNUbhV7AYbmeJo +bktbLoz8qN955whzKypa8ENoIWFeH8G0zNziivcHyG8awELonKBbd8HWBxsWUrOP +5bbSBnJ/KW+xH1R9LNUNZEyUKus4aXROmWUI23imxmNY1qXh3fOrlIIjysKCcl6+ +Pzqf/YgCqhzOvxFHjVPTjVY4S/W7EuAEzegvHhsLgra8OAT4LsEwWwvjLs+hY+T9 +s1Xh9xTuGUXwqFoL5BKvcxU9hKvWAa6tYkbvC+ciUqbms8kDaJhtSTATtEy/5n0e +Oy0+ptcPoSGyMWy3QAsB97fbwDCaWnFw3DtLdMXPvomFe5ND5IoeskCPqPRceUDQ +99v94utqv8zPx+lcYeF5uZleVW3WnVKUy1K0PAXFucVmw2WZCx5cop4LlefVQ6fK +temVHpkM/QqcEgWwFMnHuBF67e0oIwIDAQABAoICAAEBmR6Gcvb3OES7DjNvj56x +jE3bMl/wdCZH4BC8P367iv2NzYH+1ehFrIPf/3CWmW5QReemR7Gqp3Balmrwo0mT +VwALfe923WT/YWQhxcr/ypfXpkZo+2UzRcatXAvUuYJ75h8P3QJQJdjv1z6DD2h6 +DFkyCBZNgQRdG0WYex8E3ELuOXrqcyQmNAdzdqIzcJ2WsKQeD4FyoYjlD8rvWgPM +2BCz3OjL+SUTGFsxy6abPkqsKOnoKm1VRFvuwBC+wx8YgNvryyi014xmFUfTIuwI +PrPXyRmFGCmwHAm3zOAlJGr9mjg88JZDy47ZWZRFfFRlxD7HaZZIbzYo5BVG/Czi +Vd9/AK0zCb4lrU1v1jnd43M/lIbWATJIafghFehCC1pPQS8iIXlub9579Nnotcp1 +T9UJHcHdN0k6U81HLDgWj11ktZOosZKk1ELgPGqFDIA1c3CJowiY4hg+NZhrV4zP +6jz1u9a+PtaT3RPT7OCCbl92b6ywqjeUMCPOzc0BsX1Ll5aq6EMA1LD6hxb7KBp4 +QvE1v2SskShahquBv1UXIj/ljYOquKaIbwNJBqO/PywLGV03/bTdyTkzYXUm1PyE ++v0GLIMrpiQBDebxX/aFXVINJ+NxZ40ZLCaH4FbSkHNjl8/FZXHjj0IVgbB1I14F ++Pbg0tFNbNL6MxexNYhJAoIBAQDe7QGeO+CLCSRxKgMhAXgudCKrLvWsJQx/moEx +tPKfW+hzk2W509DM/OigJjheZ1ucZNHzBXx4cz2e7sLGJwiVOlGkNtPJGzM4jytX +xnz6rLw/e4VokACaDtgsVcmMzVd+qEf3KEgqKC5YzcgJzRThkmdUu8BLwJGwjbsw +Cquv2+q/lMeiVg9iWe530AY3M4jGTdwP5SFFxk8VQJXNt3Vrt93jE7gCCOFw8ERW +s2Z4LLIhqQtc7a591rVeqC0EMX7BrAxmI7LSSF0/Mhem1dMG5OM22uX/xqy6bOFr +HRgo70VXcbHTaUTq2gTBIeLyeAq18aKn4C1wVvIfYpYwYrwrAoIBAQD4YzfSiHtk +zr9VFA+5JG/koRgsUWFGBjUuc/zF9/IWqCZZFh/PqQ8H0QGGVvjs26jSniPzuXE7 +3OUhGNlsHn08fdr4gNJSiwn+LYBli279fC2r3ClaiW324Z6oCBYAwKrI+KnW/7dU +2+CuVaPZKMsoxKxMibxeBBjRdqjYs4S+LxQG/SSD3Sswy0cP/+4b+0XeLcH4A/YO +gdfyMkkatWIwIrxSqobU4x/2Mglv2z9yZEbkKTphRRUyvib0ZvsHiWaYvWVMkvQI +aODqZmlnSeRFwGcPZH3pICLpoX66Gpo1XWEC2T58ayXULWaDqFjrzHKFDYHHhbwT +rYeVWN2UjC/pAoIBAD2lVrw2HKr2FUDB+GmJKX3XGbsy2jcHa8RhqnZfbnSJCufj +C89udqApE/8tcosQk/HUo4ZLrxbz70djCTOYmbQCk48QplJHlkt+s+Kq/eaoG8qG +UucOY0w27Kqd8oVgR2C8EmKTnhkiUWa3SSkEsFCMlsyEuvF3yBIFr4zidoXED894 +yhfnKovtCJfTS9S6CImmAEPY/fF4nGfpG0PvBqaKCu7/hfPyFr2Pg6ZiJ+T1nzDV +uVGyXH1qAW+ym/g746yU8jzQhGPIJRkTthypppiSdQ63S8EGZL+M2m0vd33iZM+A +nkczJh63uXba8xJQBc44it1XjdkyAfSR46xvsU8CggEAcJY4lVt74wO7wkqnndog +RdenZ18z4lOqA3Zbmzh6KT7qsvLOVIhi4b4hHM2LY4FlkqJP1Ye3BdVononu840W +YzcA7CcoLnYLf4UMb/q1wnxpDz9qFzm3+HHoQm332zt5rkLYaTXb2cEYpKz46dQN +0lrjZoYANfVRcpCAlEk64CHiyBAyXvr/OszJVz1AprpMvxqqk0roW0cLpSKZ9FtH +OF5mUmotV43jzOp9UJBmv+koIP93EOQJaci7S4jqPE5RjOY8Zmst5inz2Qu2tzTN +HbjKfjWkxOjmKlQjZyPrxkifUnU6V5cRvI0frInAVJGFTz4CF3hM+fZsQIvZVB3/ +EQKCAQB/a7eC+wyMmbxsHyyQILZST7vq+FFrIF9sTBtdvlCQLiPMwE2H+LiRfKHt +dzFHI0kp11qHNXcBfAj6paCn7ZltBp0X4DCb3g6rQYiZLDdXPPv6/Al5spVJsqjE +8NxygVW3tHA/LBH0tE6am6hlO+HAlgTX00Bykb6bICb4gy3oUZyQPSYbHTWG82oi +jXv1zAkEizjM/dh11IXeh4GeklbLrZLfrS7wfTHxsbZ3SrAtwBsK9xQBRQGBLhYn +axfAXWwHbQgxEO+/C24sIBekrk/53+Q2CMwuafVksv/1Pb0Q1qagwfKj/XsQ/0jY +EobEVHweOzx7A2F+kGuuDiRTeZJx +-----END PRIVATE KEY----- diff --git a/tools/add_submodules.sh b/tools/add_submodules.sh new file mode 100755 index 00000000..88769872 --- /dev/null +++ b/tools/add_submodules.sh @@ -0,0 +1,12 @@ +#!/bin/bash + + +set -e + +git config -f .gitmodules --get-regexp '^submodule\..*\.path$' | + while read path_key local_path + do + url_key=$(echo $path_key | sed 's/\.path/.url/') + url=$(git config -f .gitmodules --get "$url_key") + git submodule add --depth 1 $url $local_path + done