diff --git a/Cargo.lock b/Cargo.lock index a501d7761fb..9042f09ff3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -194,9 +194,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" dependencies = [ "backtrace", ] @@ -207,7 +207,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -217,7 +217,7 @@ source = "git+https://github.com/oxidecomputer/omicron?branch=main#bd6c62807fbb2 dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -320,7 +320,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -342,7 +342,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -353,7 +353,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -391,7 +391,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -533,7 +533,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.29", + "syn 2.0.28", "which", ] @@ -1093,7 +1093,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1111,6 +1111,17 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +[[package]] +name = "clipboard-win" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7191c27c2357d9b7ef96baac1773290d4ca63b24205b82a3fd8a0637afcf0362" +dependencies = [ + "error-code", + "str-buf", + "winapi", +] + [[package]] name = "codespan-reporting" version = "0.11.1" @@ -1725,7 +1736,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1747,7 +1758,7 @@ checksum = "7bfb82b62b1b8a2a9808fb4caf844ede819a76cfc23b2827d7f94eefb49551eb" dependencies = [ "darling_core 0.20.0", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1777,7 +1788,7 @@ dependencies = [ "rustfmt-wrapper", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1830,7 +1841,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1841,7 +1852,7 @@ checksum = "5bc1955a640c4464859ae700fbe48e666da6fdce99ce5fe1acd08dd295889d10" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1928,7 +1939,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -1937,7 +1948,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -2156,7 +2167,7 @@ dependencies = [ [[package]] name = "dropshot" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#dc18a74060bdee7b2673704b6bd1607ad2bb890a" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#8ef2bd2ed815ec3ce505ff1f006cdf2ac8ad30c7" dependencies = [ "async-stream", "async-trait", @@ -2201,13 +2212,13 @@ dependencies = [ [[package]] name = "dropshot_endpoint" version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#dc18a74060bdee7b2673704b6bd1607ad2bb890a" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#8ef2bd2ed815ec3ce505ff1f006cdf2ac8ad30c7" dependencies = [ "proc-macro2", "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -2358,7 +2369,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -2432,6 +2443,16 @@ dependencies = [ "libc", ] +[[package]] +name = "error-code" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f18991e7bf11e7ffee451b5318b5c1a73c52d0d0ada6e5a3017c8c1ced6a21" +dependencies = [ + "libc", + "str-buf", +] + [[package]] name = "expectorate" version = "1.0.7" @@ -2699,7 +2720,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -3073,11 +3094,18 @@ name = "helios-fusion" version = "0.1.0" dependencies = [ "async-trait", + "camino", "itertools 0.11.0", + "regress", + "schemars", + "serde", + "serde_json", "shlex", "slog", "thiserror", "tokio", + "toml 0.7.6", + "uuid", ] [[package]] @@ -3111,19 +3139,28 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bitflags 1.3.2", "camino", "cfg-if 1.0.0", + "clap 4.3.21", "futures", "helios-fusion", + "ipnetwork", "itertools 0.11.0", "libc", "omicron-common 0.1.0", "omicron-test-utils", + "once_cell", + "petgraph", + "rustyline", "schemars", "serde", "shlex", "slog", + "slog-async", + "slog-term", "smf", + "strum 0.25.0", "thiserror", "tokio", "uuid", @@ -4566,7 +4603,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -4699,7 +4736,7 @@ checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -4782,7 +4819,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -5661,7 +5698,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -5671,7 +5708,7 @@ source = "git+https://github.com/oxidecomputer/omicron?branch=main#bd6c62807fbb2 dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -5981,6 +6018,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "phf" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259" +dependencies = [ + "phf_macros", + "phf_shared 0.10.0", + "proc-macro-hack", +] + [[package]] name = "phf" version = "0.11.1" @@ -5990,6 +6038,30 @@ dependencies = [ "phf_shared 0.11.1", ] +[[package]] +name = "phf_generator" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" +dependencies = [ + "phf_shared 0.10.0", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0" +dependencies = [ + "phf_generator", + "phf_shared 0.10.0", + "proc-macro-hack", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "phf_shared" version = "0.10.0" @@ -6275,7 +6347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -6312,6 +6384,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro2" version = "1.0.66" @@ -6324,7 +6402,7 @@ dependencies = [ [[package]] name = "progenitor" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#76716eeaaebfec796c9e7a5bf15d3a69212363bc" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0b4f441bd27a81a27b1f679d00f3d7e65601a80d" dependencies = [ "progenitor-client", "progenitor-impl", @@ -6335,7 +6413,7 @@ dependencies = [ [[package]] name = "progenitor-client" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#76716eeaaebfec796c9e7a5bf15d3a69212363bc" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0b4f441bd27a81a27b1f679d00f3d7e65601a80d" dependencies = [ "bytes", "futures-core", @@ -6349,7 +6427,7 @@ dependencies = [ [[package]] name = "progenitor-impl" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#76716eeaaebfec796c9e7a5bf15d3a69212363bc" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0b4f441bd27a81a27b1f679d00f3d7e65601a80d" dependencies = [ "getopts", "heck 0.4.1", @@ -6362,7 +6440,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.29", + "syn 2.0.28", "thiserror", "typify", "unicode-ident", @@ -6371,7 +6449,7 @@ dependencies = [ [[package]] name = "progenitor-macro" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#76716eeaaebfec796c9e7a5bf15d3a69212363bc" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#0b4f441bd27a81a27b1f679d00f3d7e65601a80d" dependencies = [ "openapiv3", "proc-macro2", @@ -6382,7 +6460,7 @@ dependencies = [ "serde_json", "serde_tokenstream 0.2.0", "serde_yaml", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -6789,22 +6867,22 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.20" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acde58d073e9c79da00f2b5b84eed919c8326832648a5b109b3fce1bb1175280" +checksum = "61ef7e18e8841942ddb1cf845054f8008410030a3997875d9e49b7a363063df1" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.20" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" +checksum = "2dfaf0c85b766276c797f3791f5bc6d5bd116b41d53049af2789666b0c0bc9fa" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -7291,6 +7369,29 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rustyline" +version = "12.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "994eca4bca05c87e86e15d90fc7a91d1be64b4482b38cb2d27474568fe7c9db9" +dependencies = [ + "bitflags 2.3.1", + "cfg-if 1.0.0", + "clipboard-win", + "fd-lock", + "home", + "libc", + "log", + "memchr", + "nix 0.26.2 (registry+https://github.com/rust-lang/crates.io-index)", + "radix_trie", + "scopeguard", + "unicode-segmentation", + "unicode-width", + "utf8parse", + "winapi", +] + [[package]] name = "ryu" version = "1.0.13" @@ -7530,7 +7631,7 @@ checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -7632,7 +7733,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -7672,7 +7773,7 @@ dependencies = [ "darling 0.20.0", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -8206,6 +8307,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "str-buf" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" + [[package]] name = "string_cache" version = "0.8.7" @@ -8313,6 +8420,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ + "phf 0.10.1", "strum_macros 0.25.2", ] @@ -8339,7 +8447,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -8371,9 +8479,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", @@ -8518,22 +8626,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -8705,10 +8813,11 @@ dependencies = [ [[package]] name = "tokio" -version = "1.32.0" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ + "autocfg", "backtrace", "bytes", "libc", @@ -8717,7 +8826,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.48.0", ] @@ -8730,7 +8839,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.28", ] [[package]] @@ -8758,7 +8867,7 @@ dependencies = [ "log", "parking_lot 0.12.1", "percent-encoding", - "phf", + "phf 0.11.1", "pin-project-lite", "postgres-protocol", "postgres-types", @@ -9210,7 +9319,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "typify" version = "0.0.13" -source = "git+https://github.com/oxidecomputer/typify#981146f90869b61a25356c6f19ac2c5511571458" +source = "git+https://github.com/oxidecomputer/typify#a53a462c83b25d69827411e90a5bf80360034fc0" dependencies = [ "typify-impl", "typify-macro", @@ -9219,7 +9328,7 @@ dependencies = [ [[package]] name = "typify-impl" version = "0.0.13" -source = "git+https://github.com/oxidecomputer/typify#981146f90869b61a25356c6f19ac2c5511571458" +source = "git+https://github.com/oxidecomputer/typify#a53a462c83b25d69827411e90a5bf80360034fc0" dependencies = [ "heck 0.4.1", "log", @@ -9228,7 +9337,7 @@ dependencies = [ "regress", "schemars", "serde_json", - "syn 2.0.29", + "syn 2.0.28", "thiserror", "unicode-ident", ] @@ -9236,7 +9345,7 @@ dependencies = [ [[package]] name = "typify-macro" version = "0.0.13" -source = "git+https://github.com/oxidecomputer/typify#981146f90869b61a25356c6f19ac2c5511571458" +source = "git+https://github.com/oxidecomputer/typify#a53a462c83b25d69827411e90a5bf80360034fc0" dependencies = [ "proc-macro2", "quote", @@ -9244,7 +9353,7 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream 0.2.0", - "syn 2.0.29", + "syn 2.0.28", "typify-impl", ] diff --git a/Cargo.toml b/Cargo.toml index 9a7eb8d0928..9a46059d3ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -144,6 +144,7 @@ base64 = "0.21.2" bb8 = "0.8.1" bcs = "0.1.5" bincode = "1.3.3" +bitflags = "1.3.2" bootstore = { path = "bootstore" } bootstrap-agent-client = { path = "bootstrap-agent-client" } buf-list = { version = "1.0.3", features = ["tokio1"] } @@ -296,6 +297,7 @@ ring = "0.16" rpassword = "7.2.0" rustfmt-wrapper = "0.2" rustls = "0.21.6" +rustyline = "12.0.0" samael = { git = "https://github.com/njaremko/samael", features = ["xmlsec"], branch = "master" } schemars = "0.8.12" secrecy = "0.8.0" @@ -333,7 +335,7 @@ static_assertions = "1.1.0" # harder than expected to make breaking changes (even if you specify a specific # SHA). Cut a new Steno release instead. See omicron#2117. steno = "0.4.0" -strum = { version = "0.25", features = [ "derive" ] } +strum = { version = "0.25", features = [ "derive", "phf" ] } subprocess = "0.2.9" libsw = { version = "3.3.0", features = ["tokio"] } syn = { version = "2.0" } diff --git a/common/src/vlan.rs b/common/src/vlan.rs index 45776e09ac9..c9ed8b11e4e 100644 --- a/common/src/vlan.rs +++ b/common/src/vlan.rs @@ -13,7 +13,7 @@ use std::str::FromStr; pub const VLAN_MAX: u16 = 4094; /// Wrapper around a VLAN ID, ensuring it is valid. -#[derive(Debug, Deserialize, Clone, Copy)] +#[derive(Debug, Deserialize, Clone, Copy, Eq, PartialEq)] pub struct VlanID(u16); impl VlanID { diff --git a/helios/fusion/Cargo.toml b/helios/fusion/Cargo.toml index bac3d9c7a24..838311de79c 100644 --- a/helios/fusion/Cargo.toml +++ b/helios/fusion/Cargo.toml @@ -7,8 +7,17 @@ license = "MPL-2.0" [dependencies] async-trait.workspace = true +camino.workspace = true itertools.workspace = true +schemars.workspace = true +serde.workspace = true shlex.workspace = true slog.workspace = true thiserror.workspace = true tokio.workspace = true +uuid.workspace = true + +[dev-dependencies] +regress.workspace = true +serde_json.workspace = true +toml.workspace = true diff --git a/illumos-utils/src/addrobj.rs b/helios/fusion/src/addrobj.rs similarity index 86% rename from illumos-utils/src/addrobj.rs rename to helios/fusion/src/addrobj.rs index d63aa42bdf5..655d8c93ad3 100644 --- a/illumos-utils/src/addrobj.rs +++ b/helios/fusion/src/addrobj.rs @@ -4,6 +4,8 @@ //! API for operating on addrobj objects. +use std::str::FromStr; + /// The name provided to all link-local IPv6 addresses. pub const IPV6_LINK_LOCAL_NAME: &str = "ll"; @@ -26,6 +28,7 @@ pub struct AddrObject { enum BadName { Interface(String), Object(String), + Other(String), } impl std::fmt::Display for BadName { @@ -36,6 +39,7 @@ impl std::fmt::Display for BadName { match self { BadName::Interface(s) => write!(f, "Bad interface name: {}", s), BadName::Object(s) => write!(f, "Bad object name: {}", s), + BadName::Other(s) => write!(f, "Bad name: {}", s), } } } @@ -84,6 +88,17 @@ impl AddrObject { } } +impl FromStr for AddrObject { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + let (interface, name) = s.split_once('/').ok_or_else(|| { + ParseError { name: BadName::Other(s.to_string()) } + })?; + Self::new(interface, name) + } +} + impl std::fmt::Display for AddrObject { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}/{}", self.interface, self.name) diff --git a/helios/fusion/src/host.rs b/helios/fusion/src/host.rs new file mode 100644 index 00000000000..27433bc69a1 --- /dev/null +++ b/helios/fusion/src/host.rs @@ -0,0 +1,26 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Represents the entire emulated host system + +use crate::interfaces::libc::Libc; +use crate::interfaces::swapctl::Swapctl; +use crate::Executor; +use std::sync::Arc; + +/// The common wrapper around the host system, which makes it trivially +/// shareable. +pub type HostSystem = Arc; + +/// Describes the interface used by Omicron when interacting with a host OS. +pub trait Host: Send + Sync { + /// Access the executor, for creating new processes + fn executor(&self) -> &dyn Executor; + + /// Access libswapctl + fn swapctl(&self) -> &dyn Swapctl; + + /// Access libc + fn libc(&self) -> &dyn Libc; +} diff --git a/helios/fusion/src/interfaces/libc.rs b/helios/fusion/src/interfaces/libc.rs new file mode 100644 index 00000000000..07c6f13f750 --- /dev/null +++ b/helios/fusion/src/interfaces/libc.rs @@ -0,0 +1,10 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Interface to the libc API + +pub trait Libc { + /// sysconf(3c) + fn sysconf(&self, arg: i32) -> std::io::Result; +} diff --git a/helios/fusion/src/interfaces/mod.rs b/helios/fusion/src/interfaces/mod.rs new file mode 100644 index 00000000000..9c058cbcb03 --- /dev/null +++ b/helios/fusion/src/interfaces/mod.rs @@ -0,0 +1,6 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +pub mod libc; +pub mod swapctl; diff --git a/helios/fusion/src/interfaces/swapctl.rs b/helios/fusion/src/interfaces/swapctl.rs new file mode 100644 index 00000000000..835ec5831e5 --- /dev/null +++ b/helios/fusion/src/interfaces/swapctl.rs @@ -0,0 +1,46 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Interface to the swapctl API + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Error listing swap devices: {0}")] + ListDevices(String), + + #[error("Error adding swap device: {msg} (path=\"{path}\", start={start}, length={length})")] + AddDevice { msg: String, path: String, start: u64, length: u64 }, +} + +/// A representation of a swap device, as returned from swapctl(2) SC_LIST +#[derive(Debug, Clone)] +pub struct SwapDevice { + /// path to the resource + pub path: String, + + /// starting block on device used for swap + pub start: u64, + + /// length of swap area + pub length: u64, + + /// total number of pages used for swapping + pub total_pages: u64, + + /// free npages for swapping + pub free_pages: u64, + + pub flags: i64, +} + +pub trait Swapctl { + /// List swap devices on the system. + fn list_swap_devices(&self) -> Result, Error>; + fn add_swap_device( + &self, + path: String, + start: u64, + length: u64, + ) -> Result<(), Error>; +} diff --git a/helios/fusion/src/lib.rs b/helios/fusion/src/lib.rs index 3fc9d5a1727..9ce7b13c27a 100644 --- a/helios/fusion/src/lib.rs +++ b/helios/fusion/src/lib.rs @@ -4,14 +4,54 @@ //! Interfaces used to interact with the underlying host system. +pub mod addrobj; mod error; mod executor; +mod host; mod input; +pub mod interfaces; mod output; +pub mod zpool; pub use error::*; pub use executor::*; +pub use host::*; pub use input::*; pub use output::*; +pub const COREADM: &str = "/usr/bin/coreadm"; +pub const DLADM: &str = "/usr/sbin/dladm"; +pub const DUMPADM: &str = "/usr/sbin/dumpadm"; +pub const FSTYP: &str = "/usr/sbin/fstyp"; +pub const IPADM: &str = "/usr/sbin/ipadm"; pub const PFEXEC: &str = "/usr/bin/pfexec"; +pub const ROUTE: &str = "/usr/sbin/route"; +pub const SAVECORE: &str = "/usr/bin/savecore"; +pub const SVCADM: &str = "/usr/sbin/svcadm"; +pub const SVCCFG: &str = "/usr/sbin/svccfg"; +pub const ZFS: &str = "/usr/sbin/zfs"; +pub const ZLOGIN: &str = "/usr/sbin/zlogin"; +pub const ZONEADM: &str = "/usr/sbin/zoneadm"; +pub const ZONECFG: &str = "/usr/sbin/zonecfg"; +pub const ZPOOL: &str = "/usr/sbin/zpool"; + +pub fn which_binary(short: &str) -> &str { + match short { + "coreadm" => COREADM, + "dladm" => DLADM, + "dumpadm" => DUMPADM, + "fstyp" => FSTYP, + "ipadm" => IPADM, + "pfexec" => PFEXEC, + "route" => ROUTE, + "savecore" => SAVECORE, + "svcadm" => SVCADM, + "svccfg" => SVCCFG, + "zfs" => ZFS, + "zlogin" => ZLOGIN, + "zoneadm" => ZONEADM, + "zonecfg" => ZONECFG, + "zpool" => ZPOOL, + short => short, + } +} diff --git a/helios/fusion/src/zpool.rs b/helios/fusion/src/zpool.rs new file mode 100644 index 00000000000..6faac51b1eb --- /dev/null +++ b/helios/fusion/src/zpool.rs @@ -0,0 +1,438 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Zpool structures + +use camino::Utf8PathBuf; +use schemars::JsonSchema; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; +use uuid::Uuid; + +const ZPOOL_EXTERNAL_PREFIX: &str = "oxp_"; +const ZPOOL_INTERNAL_PREFIX: &str = "oxi_"; + +#[derive(thiserror::Error, Debug, PartialEq, Eq)] +#[error("Failed to parse output: {0}")] +pub struct ParseError(String); + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ZpoolHealth { + /// The device is online and functioning. + Online, + /// One or more components are degraded or faulted, but sufficient + /// replicas exist to continue functioning. + Degraded, + /// One or more components are degraded or faulted, and insufficient + /// replicas exist to continue functioning. + Faulted, + /// The device was explicitly taken offline by "zpool offline". + Offline, + /// The device was physically removed. + Removed, + /// The device could not be opened. + Unavailable, +} + +impl FromStr for ZpoolHealth { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + match s { + "ONLINE" => Ok(ZpoolHealth::Online), + "DEGRADED" => Ok(ZpoolHealth::Degraded), + "FAULTED" => Ok(ZpoolHealth::Faulted), + "OFFLINE" => Ok(ZpoolHealth::Offline), + "REMOVED" => Ok(ZpoolHealth::Removed), + "UNAVAIL" => Ok(ZpoolHealth::Unavailable), + _ => Err(ParseError(format!("Unrecognized zpool 'health': {}", s))), + } + } +} + +impl ToString for ZpoolHealth { + fn to_string(&self) -> String { + use ZpoolHealth::*; + match self { + Online => "ONLINE", + Degraded => "DEGRADED", + Faulted => "FAULTED", + Offline => "OFFLINE", + Removed => "REMOVED", + Unavailable => "UNAVAIL", + } + .to_string() + } +} + +/// Describes a Zpool. +#[derive(Clone, Debug)] +pub struct ZpoolInfo { + name: String, + size: u64, + allocated: u64, + free: u64, + health: ZpoolHealth, +} + +impl ZpoolInfo { + pub fn name(&self) -> &str { + &self.name + } + + pub fn size(&self) -> u64 { + self.size + } + + #[allow(dead_code)] + pub fn allocated(&self) -> u64 { + self.allocated + } + + #[allow(dead_code)] + pub fn free(&self) -> u64 { + self.free + } + + #[allow(dead_code)] + pub fn health(&self) -> ZpoolHealth { + self.health + } +} + +impl FromStr for ZpoolInfo { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + // Lambda helpers for error handling. + let expected_field = |name| { + ParseError(format!("Missing '{}' value in zpool list output", name)) + }; + let failed_to_parse = |name, err| { + ParseError(format!("Failed to parse field '{}': {}", name, err)) + }; + + let mut values = s.trim().split_whitespace(); + let name = + values.next().ok_or_else(|| expected_field("name"))?.to_string(); + let size = values + .next() + .ok_or_else(|| expected_field("size"))? + .parse::() + .map_err(|e| failed_to_parse("size", e))?; + let allocated = values + .next() + .ok_or_else(|| expected_field("allocated"))? + .parse::() + .map_err(|e| failed_to_parse("allocated", e))?; + let free = values + .next() + .ok_or_else(|| expected_field("free"))? + .parse::() + .map_err(|e| failed_to_parse("free", e))?; + let health = values + .next() + .ok_or_else(|| expected_field("health"))? + .parse::()?; + + Ok(ZpoolInfo { name, size, allocated, free, health }) + } +} + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum ZpoolKind { + // This zpool is used for external storage (u.2) + External, + // This zpool is used for internal storage (m.2) + Internal, +} + +/// A wrapper around a zpool name. +/// +/// This expects that the format will be: `ox{i,p}_` - we parse the prefix +/// when reading the structure, and validate that the UUID can be utilized. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct ZpoolName { + id: Uuid, + kind: ZpoolKind, +} + +const ZPOOL_NAME_REGEX: &str = r"^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"; + +/// Custom JsonSchema implementation to encode the constraints on Name. +impl JsonSchema for ZpoolName { + fn schema_name() -> String { + "ZpoolName".to_string() + } + fn json_schema( + _: &mut schemars::gen::SchemaGenerator, + ) -> schemars::schema::Schema { + schemars::schema::SchemaObject { + metadata: Some(Box::new(schemars::schema::Metadata { + title: Some( + "The name of a Zpool".to_string(), + ), + description: Some( + "Zpool names are of the format ox{i,p}_. They are either \ + Internal or External, and should be unique" + .to_string(), + ), + ..Default::default() + })), + instance_type: Some(schemars::schema::InstanceType::String.into()), + string: Some(Box::new(schemars::schema::StringValidation { + pattern: Some(ZPOOL_NAME_REGEX.to_owned()), + ..Default::default() + })), + ..Default::default() + } + .into() + } +} + +impl ZpoolName { + pub fn new_internal(id: Uuid) -> Self { + Self { id, kind: ZpoolKind::Internal } + } + + pub fn new_external(id: Uuid) -> Self { + Self { id, kind: ZpoolKind::External } + } + + pub fn id(&self) -> Uuid { + self.id + } + + pub fn kind(&self) -> ZpoolKind { + self.kind + } + + /// Returns a path to a dataset's mountpoint within the zpool. + /// + /// For example: oxp_(UUID) -> /pool/ext/(UUID)/(dataset) + pub fn dataset_mountpoint(&self, dataset: &str) -> Utf8PathBuf { + let mut path = Utf8PathBuf::new(); + path.push("/pool"); + match self.kind { + ZpoolKind::External => path.push("ext"), + ZpoolKind::Internal => path.push("int"), + }; + path.push(self.id().to_string()); + path.push(dataset); + path + } +} + +impl<'de> Deserialize<'de> for ZpoolName { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + ZpoolName::from_str(&s).map_err(serde::de::Error::custom) + } +} + +impl Serialize for ZpoolName { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl FromStr for ZpoolName { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(s) = s.strip_prefix(ZPOOL_EXTERNAL_PREFIX) { + let id = Uuid::from_str(s).map_err(|e| e.to_string())?; + Ok(ZpoolName::new_external(id)) + } else if let Some(s) = s.strip_prefix(ZPOOL_INTERNAL_PREFIX) { + let id = Uuid::from_str(s).map_err(|e| e.to_string())?; + Ok(ZpoolName::new_internal(id)) + } else { + Err(format!( + "Bad zpool name {s}; must start with '{ZPOOL_EXTERNAL_PREFIX}' or '{ZPOOL_INTERNAL_PREFIX}'", + )) + } + } +} + +impl fmt::Display for ZpoolName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let prefix = match self.kind { + ZpoolKind::External => ZPOOL_EXTERNAL_PREFIX, + ZpoolKind::Internal => ZPOOL_INTERNAL_PREFIX, + }; + write!(f, "{prefix}{}", self.id) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_zpool_name_regex() { + let valid = [ + "oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + ]; + + let invalid = [ + "", + // Whitespace + " oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b ", + // Case sensitivity + "oxp_D462A7F7-b628-40fe-80ff-4e4189e2d62b", + // Bad prefix + "ox_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxa_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxi-d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp-d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Missing Prefix + "d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Bad UUIDs (Not following UUIDv4 format) + "oxi_d462a7f7-b628-30fe-80ff-4e4189e2d62b", + "oxi_d462a7f7-b628-40fe-c0ff-4e4189e2d62b", + ]; + + let r = regress::Regex::new(ZPOOL_NAME_REGEX) + .expect("validation regex is valid"); + for input in valid { + let m = r + .find(input) + .unwrap_or_else(|| panic!("input {input} did not match regex")); + assert_eq!(m.start(), 0, "input {input} did not match start"); + assert_eq!(m.end(), input.len(), "input {input} did not match end"); + } + + for input in invalid { + assert!( + r.find(input).is_none(), + "invalid input {input} should not match validation regex" + ); + } + } + + #[test] + fn test_parse_zpool_name_json() { + #[derive(Serialize, Deserialize, JsonSchema)] + struct TestDataset { + pool_name: ZpoolName, + } + + // Confirm that we can convert from a JSON string to a a ZpoolName + let json_string = + r#"{"pool_name":"oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b"}"#; + let dataset: TestDataset = serde_json::from_str(json_string) + .expect("Could not parse ZpoolName from Json Object"); + assert!(matches!(dataset.pool_name.kind, ZpoolKind::Internal)); + + // Confirm we can go the other way (ZpoolName to JSON string) too. + let j = serde_json::to_string(&dataset) + .expect("Cannot convert back to JSON string"); + assert_eq!(j, json_string); + } + + fn toml_string(s: &str) -> String { + format!("zpool_name = \"{}\"", s) + } + + fn parse_name(s: &str) -> Result { + toml_string(s) + .parse::() + .expect("Cannot parse as TOML value") + .get("zpool_name") + .expect("Missing key") + .clone() + .try_into::() + } + + #[test] + fn test_parse_external_zpool_name() { + let uuid: Uuid = + "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); + let good_name = format!("{}{}", ZPOOL_EXTERNAL_PREFIX, uuid); + + let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); + assert_eq!(uuid, name.id()); + assert_eq!(ZpoolKind::External, name.kind()); + } + + #[test] + fn test_parse_internal_zpool_name() { + let uuid: Uuid = + "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); + let good_name = format!("{}{}", ZPOOL_INTERNAL_PREFIX, uuid); + + let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); + assert_eq!(uuid, name.id()); + assert_eq!(ZpoolKind::Internal, name.kind()); + } + + #[test] + fn test_parse_bad_zpool_names() { + let bad_names = vec![ + // Nonsense string + "this string is GARBAGE", + // Missing prefix + "d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Underscores + "oxp_d462a7f7_b628_40fe_80ff_4e4189e2d62b", + ]; + + for bad_name in &bad_names { + assert!( + parse_name(&bad_name).is_err(), + "Parsing {} should fail", + bad_name + ); + } + } + + #[test] + fn test_parse_zpool() { + let name = "rpool"; + let size = 10000; + let allocated = 6000; + let free = 4000; + let health = "ONLINE"; + + // We should be able to tolerate any whitespace between columns. + let input = format!( + "{} {} {} \t\t\t {} {}", + name, size, allocated, free, health + ); + let output: ZpoolInfo = input.parse().unwrap(); + assert_eq!(output.name(), name); + assert_eq!(output.size(), size); + assert_eq!(output.allocated(), allocated); + assert_eq!(output.free(), free); + assert_eq!(output.health(), ZpoolHealth::Online); + } + + #[test] + fn test_parse_zpool_missing_column() { + let name = "rpool"; + let size = 10000; + let allocated = 6000; + let free = 4000; + let _health = "ONLINE"; + + // Similar to the prior test case, just omit "health". + let input = format!("{} {} {} {}", name, size, allocated, free); + let result: Result = input.parse(); + + let expected_err = ParseError( + "Missing 'health' value in zpool list output".to_owned(), + ); + assert_eq!(result.unwrap_err(), expected_err,); + } +} diff --git a/helios/protostar/src/executor.rs b/helios/protostar/src/executor.rs new file mode 100644 index 00000000000..5ad75e4f01c --- /dev/null +++ b/helios/protostar/src/executor.rs @@ -0,0 +1,154 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A "real" [Executor] implementation, which sends commands to the host. + +use helios_fusion::{ + log_input, log_output, AsCommandStr, BoxedChild, BoxedExecutor, Child, + ExecutionError, Executor, Input, Output, +}; + +use async_trait::async_trait; +use slog::{error, Logger}; +use std::io::{Read, Write}; +use std::process::{Command, Stdio}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +/// Implements [Executor] by running commands against the host system. +pub struct HostExecutor { + log: slog::Logger, + counter: std::sync::atomic::AtomicU64, +} + +impl HostExecutor { + pub fn new(log: Logger) -> Arc { + Arc::new(Self { log, counter: AtomicU64::new(0) }) + } + + pub fn as_executor(self: Arc) -> BoxedExecutor { + self + } + + fn prepare(&self, command: &Command) -> u64 { + let id = self.counter.fetch_add(1, Ordering::SeqCst); + log_input(&self.log, id, command); + id + } + + fn finalize( + &self, + command: &Command, + id: u64, + output: Output, + ) -> Result { + log_output(&self.log, id, &output); + if !output.status.success() { + return Err(ExecutionError::from_output(command, &output)); + } + Ok(output) + } +} + +#[async_trait] +impl Executor for HostExecutor { + async fn execute_async( + &self, + command: &mut tokio::process::Command, + ) -> Result { + let id = self.prepare(command.as_std()); + let output = command.output().await.map_err(|err| { + error!(self.log, "Could not start program asynchronously!"; "id" => id); + ExecutionError::ExecutionStart { + command: Input::from(command.as_std()).to_string(), + err, + } + })?; + self.finalize(command.as_std(), id, output) + } + + fn execute(&self, command: &mut Command) -> Result { + let id = self.prepare(command); + let output = command.output().map_err(|err| { + error!(self.log, "Could not start program!"; "id" => id); + ExecutionError::ExecutionStart { + command: Input::from(&*command).to_string(), + err, + } + })?; + self.finalize(command, id, output) + } + + fn spawn( + &self, + command: &mut Command, + ) -> Result { + let command_str = (&*command).into_str(); + Ok(Box::new(SpawnedChild { + child: Some( + command + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .map_err(|err| ExecutionError::ExecutionStart { + command: command_str.clone(), + err, + })?, + ), + command_str, + })) + } +} + +/// A real, host-controlled child process +pub struct SpawnedChild { + command_str: String, + child: Option, +} + +impl Child for SpawnedChild { + fn take_stdin(&mut self) -> Option> { + self.child + .as_mut()? + .stdin + .take() + .map(|s| Box::new(s) as Box) + } + + fn take_stdout(&mut self) -> Option> { + self.child + .as_mut()? + .stdout + .take() + .map(|s| Box::new(s) as Box) + } + + fn take_stderr(&mut self) -> Option> { + self.child + .as_mut()? + .stderr + .take() + .map(|s| Box::new(s) as Box) + } + + fn id(&self) -> u32 { + self.child.as_ref().expect("No child").id() + } + + fn wait(mut self: Box) -> Result { + let output = + self.child.take().unwrap().wait_with_output().map_err(|err| { + ExecutionError::ExecutionStart { + command: self.command_str.clone(), + err, + } + })?; + + if !output.status.success() { + return Err(ExecutionError::from_output(self.command_str, &output)); + } + + Ok(output) + } +} diff --git a/helios/protostar/src/host.rs b/helios/protostar/src/host.rs new file mode 100644 index 00000000000..410fe9fa781 --- /dev/null +++ b/helios/protostar/src/host.rs @@ -0,0 +1,46 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::executor::HostExecutor; +use crate::libc::RealLibc; +use crate::swapctl::RealSwapctl; +use helios_fusion::interfaces::{libc::Libc, swapctl::Swapctl}; +use helios_fusion::{Executor, Host, HostSystem}; +use slog::Logger; +use std::sync::Arc; + +struct RealHost { + executor: Arc, + + libc: RealLibc, + swapctl: RealSwapctl, +} + +impl RealHost { + pub fn new(log: Logger) -> Arc { + Arc::new(Self { + executor: HostExecutor::new(log), + libc: Default::default(), + swapctl: Default::default(), + }) + } + + pub fn as_host(self: Arc) -> HostSystem { + self + } +} + +impl Host for RealHost { + fn executor(&self) -> &dyn Executor { + &*self.executor + } + + fn libc(&self) -> &dyn Libc { + &self.libc + } + + fn swapctl(&self) -> &dyn Swapctl { + &self.swapctl + } +} diff --git a/helios/protostar/src/lib.rs b/helios/protostar/src/lib.rs index 5ad75e4f01c..2fe7f3d97ec 100644 --- a/helios/protostar/src/lib.rs +++ b/helios/protostar/src/lib.rs @@ -2,153 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! A "real" [Executor] implementation, which sends commands to the host. +mod executor; +mod host; +mod libc; +mod swapctl; -use helios_fusion::{ - log_input, log_output, AsCommandStr, BoxedChild, BoxedExecutor, Child, - ExecutionError, Executor, Input, Output, -}; - -use async_trait::async_trait; -use slog::{error, Logger}; -use std::io::{Read, Write}; -use std::process::{Command, Stdio}; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; - -/// Implements [Executor] by running commands against the host system. -pub struct HostExecutor { - log: slog::Logger, - counter: std::sync::atomic::AtomicU64, -} - -impl HostExecutor { - pub fn new(log: Logger) -> Arc { - Arc::new(Self { log, counter: AtomicU64::new(0) }) - } - - pub fn as_executor(self: Arc) -> BoxedExecutor { - self - } - - fn prepare(&self, command: &Command) -> u64 { - let id = self.counter.fetch_add(1, Ordering::SeqCst); - log_input(&self.log, id, command); - id - } - - fn finalize( - &self, - command: &Command, - id: u64, - output: Output, - ) -> Result { - log_output(&self.log, id, &output); - if !output.status.success() { - return Err(ExecutionError::from_output(command, &output)); - } - Ok(output) - } -} - -#[async_trait] -impl Executor for HostExecutor { - async fn execute_async( - &self, - command: &mut tokio::process::Command, - ) -> Result { - let id = self.prepare(command.as_std()); - let output = command.output().await.map_err(|err| { - error!(self.log, "Could not start program asynchronously!"; "id" => id); - ExecutionError::ExecutionStart { - command: Input::from(command.as_std()).to_string(), - err, - } - })?; - self.finalize(command.as_std(), id, output) - } - - fn execute(&self, command: &mut Command) -> Result { - let id = self.prepare(command); - let output = command.output().map_err(|err| { - error!(self.log, "Could not start program!"; "id" => id); - ExecutionError::ExecutionStart { - command: Input::from(&*command).to_string(), - err, - } - })?; - self.finalize(command, id, output) - } - - fn spawn( - &self, - command: &mut Command, - ) -> Result { - let command_str = (&*command).into_str(); - Ok(Box::new(SpawnedChild { - child: Some( - command - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .map_err(|err| ExecutionError::ExecutionStart { - command: command_str.clone(), - err, - })?, - ), - command_str, - })) - } -} - -/// A real, host-controlled child process -pub struct SpawnedChild { - command_str: String, - child: Option, -} - -impl Child for SpawnedChild { - fn take_stdin(&mut self) -> Option> { - self.child - .as_mut()? - .stdin - .take() - .map(|s| Box::new(s) as Box) - } - - fn take_stdout(&mut self) -> Option> { - self.child - .as_mut()? - .stdout - .take() - .map(|s| Box::new(s) as Box) - } - - fn take_stderr(&mut self) -> Option> { - self.child - .as_mut()? - .stderr - .take() - .map(|s| Box::new(s) as Box) - } - - fn id(&self) -> u32 { - self.child.as_ref().expect("No child").id() - } - - fn wait(mut self: Box) -> Result { - let output = - self.child.take().unwrap().wait_with_output().map_err(|err| { - ExecutionError::ExecutionStart { - command: self.command_str.clone(), - err, - } - })?; - - if !output.status.success() { - return Err(ExecutionError::from_output(self.command_str, &output)); - } - - Ok(output) - } -} +pub use executor::*; diff --git a/helios/protostar/src/libc.rs b/helios/protostar/src/libc.rs new file mode 100644 index 00000000000..ee78aa52b26 --- /dev/null +++ b/helios/protostar/src/libc.rs @@ -0,0 +1,20 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Miscellaneous FFI wrapper functions for libc + +use helios_fusion::interfaces::libc::Libc; + +#[derive(Default)] +pub struct RealLibc {} + +impl Libc for RealLibc { + fn sysconf(&self, arg: i32) -> std::io::Result { + let res = unsafe { libc::sysconf(arg) }; + if res == -1 { + return Err(std::io::Error::last_os_error()); + } + Ok(res) + } +} diff --git a/helios/protostar/src/swapctl.rs b/helios/protostar/src/swapctl.rs new file mode 100644 index 00000000000..6130a9c82b2 --- /dev/null +++ b/helios/protostar/src/swapctl.rs @@ -0,0 +1,224 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Operations for creating a system swap device. + +use helios_fusion::interfaces::swapctl::{Error, SwapDevice, Swapctl}; + +// swapctl(2) +#[cfg(target_os = "illumos")] +extern "C" { + fn swapctl(cmd: i32, arg: *mut libc::c_void) -> i32; +} + +// TODO: in the limit, we probably want to stub out all illumos-specific +// calls, and perhaps define an alternate version of this module for +// non-illumos targets. But currently, this code is only used by the real +// sled agent, and there is a fair amount of work there to make the real +// sled agent work on non-illumos targets. So for now, just stub out this +// piece. +#[cfg(not(target_os = "illumos"))] +fn swapctl(_cmd: i32, _arg: *mut libc::c_void) -> i32 { + panic!("swapctl(2) only on illumos"); +} + +// swapctl(2) commands +const SC_ADD: i32 = 0x1; +const SC_LIST: i32 = 0x2; +#[allow(dead_code)] +const SC_REMOVE: i32 = 0x3; +const SC_GETNSWP: i32 = 0x4; + +// argument for SC_ADD and SC_REMOVE +#[repr(C)] +#[derive(Debug, Copy, Clone)] +struct swapres { + sr_name: *const libc::c_char, + sr_start: libc::off_t, + sr_length: libc::off_t, +} + +// argument for SC_LIST: swaptbl with an embedded array of swt_n swapents +#[repr(C)] +#[derive(Debug, Clone)] +struct swaptbl { + swt_n: i32, + swt_ent: [swapent; N_SWAPENTS], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +struct swapent { + ste_path: *const libc::c_char, + ste_start: libc::off_t, + ste_length: libc::off_t, + ste_pages: libc::c_long, + ste_free: libc::c_long, + ste_flags: libc::c_long, +} +impl Default for swapent { + fn default() -> Self { + Self { + ste_path: std::ptr::null_mut(), + ste_start: 0, + ste_length: 0, + ste_pages: 0, + ste_free: 0, + ste_flags: 0, + } + } +} + +/// The argument for SC_LIST (struct swaptbl) requires an embedded array in +/// the struct, with swt_n entries, each of which requires a pointer to store +/// the path to the device. +/// +/// Ideally, we would want to query the number of swap devices on the system +/// via SC_GETNSWP, allocate enough memory for each device entry, then pass +/// in pointers to memory to the list command. Unfortunately, creating a +/// generically large array embedded in a struct that can be passed to C is a +/// bit of a challenge in safe Rust. So instead, we just pick a reasonable +/// max number of devices to list. +/// +/// We pick a max of 3 devices, somewhat arbitrarily. We only ever expect to +/// see 0 or 1 swap device(s); if there are more, that is a bug. In the case +/// that we see more than 1 swap device, we log a warning, and eventually, we +/// should send an ereport. +const N_SWAPENTS: usize = 3; + +/// Wrapper around swapctl(2) call. All commands except SC_GETNSWP require an +/// argument, hence `data` being an optional parameter. +unsafe fn swapctl_cmd( + cmd: i32, + data: Option>, +) -> std::io::Result { + assert!(cmd >= SC_ADD && cmd <= SC_GETNSWP, "invalid swapctl cmd: {cmd}"); + + let ptr = match data { + Some(v) => v.as_ptr() as *mut libc::c_void, + None => std::ptr::null_mut(), + }; + + let res = swapctl(cmd, ptr); + if res == -1 { + return Err(std::io::Error::last_os_error()); + } + + Ok(res as u32) +} + +#[derive(Default)] +pub(crate) struct RealSwapctl {} + +impl Swapctl for RealSwapctl { + /// List swap devices on the system. + fn list_swap_devices(&self) -> Result, Error> { + // Each swapent requires a char * pointer in our control for the + // `ste_path` field,, which the kernel will fill in with a path if there + // is a swap device for that entry. Because these pointers are mutated + // by the kernel, we mark them as mutable. (Note that the compiler will + // happily accept these definitions as non-mutable, since it can't know + // what happens to the pointers on the C side, but not marking them as + // mutable when they may be in fact be mutated is undefined behavior). + // + // Per limits.h(3HEAD), PATH_MAX is the max number of bytes in a path + // name, including the null terminating character, so these buffers + // have sufficient space for paths on the system. + const MAXPATHLEN: usize = libc::PATH_MAX as usize; + let mut p1 = [0i8; MAXPATHLEN]; + let mut p2 = [0i8; MAXPATHLEN]; + let mut p3 = [0i8; MAXPATHLEN]; + let entries: [swapent; N_SWAPENTS] = [ + swapent { + ste_path: &mut p1 as *mut libc::c_char, + ..Default::default() + }, + swapent { + ste_path: &mut p2 as *mut libc::c_char, + ..Default::default() + }, + swapent { + ste_path: &mut p3 as *mut libc::c_char, + ..Default::default() + }, + ]; + + let mut list_req = + swaptbl { swt_n: N_SWAPENTS as i32, swt_ent: entries }; + // Unwrap safety: We know this isn't null because we just created it + let ptr = std::ptr::NonNull::new(&mut list_req).unwrap(); + let n_devices = unsafe { + swapctl_cmd(SC_LIST, Some(ptr)) + .map_err(|e| Error::ListDevices(e.to_string()))? + }; + + let mut devices = Vec::with_capacity(n_devices as usize); + for i in 0..n_devices as usize { + let e = list_req.swt_ent[i]; + + // Safety: CStr::from_ptr is documented as safe if: + // 1. The pointer contains a valid null terminator at the end of + // the string + // 2. The pointer is valid for reads of bytes up to and including + // the null terminator + // 3. The memory referenced by the return CStr is not mutated for + // the duration of lifetime 'a + // + // (1) is true because we initialize the buffers for ste_path as all + // 0s, and their length is long enough to include the null + // terminator for all paths on the system. + // (2) should be guaranteed by the syscall itself, and we can know + // how many entries are valid via its return value. + // (3) We aren't currently mutating the memory referenced by the + // CStr, though there's nothing here enforcing that. + let p = unsafe { std::ffi::CStr::from_ptr(e.ste_path) }; + let path = String::from_utf8_lossy(p.to_bytes()).to_string(); + + devices.push(SwapDevice { + path: path, + start: e.ste_start as u64, + length: e.ste_length as u64, + total_pages: e.ste_pages as u64, + free_pages: e.ste_free as u64, + flags: e.ste_flags, + }); + } + + Ok(devices) + } + + fn add_swap_device( + &self, + path: String, + start: u64, + length: u64, + ) -> Result<(), Error> { + let path_cp = path.clone(); + let name = + std::ffi::CString::new(path).map_err(|e| Error::AddDevice { + msg: format!("could not convert path to CString: {}", e,), + path: path_cp.clone(), + start: start, + length: length, + })?; + + let mut add_req = swapres { + sr_name: name.as_ptr(), + sr_start: start as i64, + sr_length: length as i64, + }; + // Unwrap safety: We know this isn't null because we just created it + let ptr = std::ptr::NonNull::new(&mut add_req).unwrap(); + let res = unsafe { + swapctl_cmd(SC_ADD, Some(ptr)).map_err(|e| Error::AddDevice { + msg: e.to_string(), + path: path_cp, + start: start, + length: length, + })? + }; + assert_eq!(res, 0); + + Ok(()) + } +} diff --git a/helios/tokamak/Cargo.toml b/helios/tokamak/Cargo.toml index c09c7e37c8c..1e8db3d4fa7 100644 --- a/helios/tokamak/Cargo.toml +++ b/helios/tokamak/Cargo.toml @@ -8,18 +8,27 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true async-trait.workspace = true +bitflags.workspace = true camino.workspace = true +clap.workspace = true cfg-if.workspace = true futures.workspace = true helios-fusion.workspace = true itertools.workspace = true +ipnetwork.workspace = true libc.workspace = true omicron-common.workspace = true +once_cell.workspace = true +petgraph.workspace = true +rustyline.workspace = true schemars.workspace = true serde.workspace = true shlex.workspace = true slog.workspace = true +slog-async.workspace = true +slog-term.workspace = true smf.workspace = true +strum.workspace = true thiserror.workspace = true tokio.workspace = true uuid.workspace = true diff --git a/helios/tokamak/src/bin/tokomaksh.rs b/helios/tokamak/src/bin/tokomaksh.rs new file mode 100644 index 00000000000..3560284e4af --- /dev/null +++ b/helios/tokamak/src/bin/tokomaksh.rs @@ -0,0 +1,144 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A shell-based interface to tokamak + +use anyhow::anyhow; +use camino::Utf8PathBuf; +use clap::{Parser, ValueEnum}; +use helios_fusion::Host; +use helios_fusion::Input; +use helios_tokamak::FakeHost; +use slog::Drain; +use slog::Level; +use slog::LevelFilter; +use slog::Logger; +use slog_term::FullFormat; +use slog_term::TermDecorator; +use std::process::Command; + +#[derive(Clone, Debug, ValueEnum)] +#[clap(rename_all = "kebab_case")] +enum MachineMode { + /// The machine exists with no hardware + Empty, + /// The machine is pre-populated with some disk devices + Disks, + /// The machine is populated with a variety of running interfaces. + Populate, +} + +fn parse_log_level(s: &str) -> anyhow::Result { + s.parse().map_err(|_| anyhow!("Invalid log level")) +} + +#[derive(Debug, Parser)] +struct Args { + /// Describes how to pre-populate the fake machine + #[clap(long = "machine-mode", default_value = "empty")] + machine_mode: MachineMode, + + /// The log level for the command. + #[arg(long, value_parser = parse_log_level, default_value_t = Level::Warning)] + log_level: Level, +} + +fn run_command_during_setup(host: &FakeHost, command: &mut Command) { + println!("[POPULATING] $ {}", Input::from(&*command)); + let output = host.executor().execute(command).expect("Failed during setup"); + + print!("{}", String::from_utf8_lossy(&output.stdout)); + eprint!("{}", String::from_utf8_lossy(&output.stderr)); +} + +#[tokio::main] +async fn main() -> Result<(), anyhow::Error> { + let args = Args::parse(); + + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let drain = LevelFilter::new(drain, args.log_level).fuse(); + let log = Logger::root(drain, slog::o!("unit" => "zone-bundle")); + + let config = rustyline::Config::builder().auto_add_history(true).build(); + let mut rl = rustyline::Editor::<(), _>::with_history( + config, + rustyline::history::MemHistory::new(), + )?; + + let host = FakeHost::new(log); + + let add_vdevs = || { + let vdevs = vec![ + Utf8PathBuf::from("/unreal/block/a"), + Utf8PathBuf::from("/unreal/block/b"), + Utf8PathBuf::from("/unreal/block/c"), + ]; + + for vdev in &vdevs { + println!("[POPULATING] Adding virtual device: {vdev}"); + } + + host.add_devices(&vdevs); + }; + + match args.machine_mode { + MachineMode::Empty => (), + MachineMode::Disks => { + add_vdevs(); + } + MachineMode::Populate => { + add_vdevs(); + run_command_during_setup( + &host, + Command::new(helios_fusion::ZPOOL).args([ + "create", + "oxp_2f11d4e8-fa31-4230-a781-e800a51404e7", + "/unreal/block/a", + ]), + ); + run_command_during_setup( + &host, + Command::new(helios_fusion::ZFS) + .args(["create", "oxp_2f11d4e8-fa31-4230-a781-e800a51404e7/nested_filesystem"]) + ); + } + } + + const DEFAULT: &str = "🍩 "; + const OK: &str = "✅ "; + const ERR: &str = "❌ "; + let mut prompt = DEFAULT; + + while let Ok(line) = rl.readline(prompt) { + let Some(args) = shlex::split(&line) else { + eprintln!("Couldn't parse that, try again "); + continue; + }; + if args.is_empty() { + prompt = DEFAULT; + continue; + } + let program = helios_fusion::which_binary(&args[0]); + let mut cmd = tokio::process::Command::new(program); + cmd.args(&args[1..]); + match host.executor().execute_async(&mut cmd).await { + Ok(output) => { + print!("{}", String::from_utf8_lossy(&output.stdout)); + prompt = OK; + } + Err(err) => { + match err { + helios_fusion::ExecutionError::CommandFailure(info) => { + eprintln!("{}", info.stderr); + } + _ => eprintln!("{}", err), + } + prompt = ERR; + } + } + } + Ok(()) +} diff --git a/helios/tokamak/src/cli/dladm.rs b/helios/tokamak/src/cli/dladm.rs new file mode 100644 index 00000000000..46bcf8e55e8 --- /dev/null +++ b/helios/tokamak/src/cli/dladm.rs @@ -0,0 +1,538 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::LinkName; + +use helios_fusion::Input; +use helios_fusion::DLADM; +use omicron_common::vlan::VlanID; +use std::collections::HashMap; +use std::str::FromStr; + +#[derive(Debug)] +pub(crate) enum Command { + CreateVnic { + link: LinkName, + temporary: bool, + mac: Option, + vlan: Option, + name: LinkName, + properties: HashMap, + }, + CreateEtherstub { + temporary: bool, + name: LinkName, + }, + DeleteEtherstub { + temporary: bool, + name: LinkName, + }, + DeleteVnic { + temporary: bool, + name: LinkName, + }, + ShowEtherstub { + name: Option, + }, + ShowLink { + name: LinkName, + fields: Vec, + }, + ShowPhys { + mac: bool, + fields: Vec, + name: Option, + }, + ShowVnic { + fields: Option>, + name: Option, + }, + SetLinkprop { + temporary: bool, + properties: HashMap, + name: LinkName, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != DLADM { + return Err(format!("Not dladm command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + match input.shift_arg()?.as_str() { + "create-vnic" => { + let mut link = None; + let mut temporary = false; + let mut mac = None; + let mut vlan = None; + let mut properties = HashMap::new(); + let name = LinkName(input.shift_last_arg()?); + + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else if input.shift_arg_if("-p")? { + let props = input.shift_arg()?; + let props = props.split(','); + for prop in props { + let (k, v) = + prop.split_once('=').ok_or_else(|| { + format!("Bad property: {prop}") + })?; + properties.insert(k.to_string(), v.to_string()); + } + } else if input.shift_arg_if("-m")? { + // NOTE: Not yet supporting the keyword-based MACs. + mac = Some(input.shift_arg()?); + } else if input.shift_arg_if("-l")? { + link = Some(LinkName(input.shift_arg()?)); + } else if input.shift_arg_if("-v")? { + vlan = Some( + VlanID::from_str(&input.shift_arg()?) + .map_err(|e| e.to_string())?, + ); + } else { + return Err(format!( + "Invalid arguments {}", + input.input() + )); + } + } + + Ok(Self::CreateVnic { + link: link.ok_or_else(|| "Missing link")?, + temporary, + mac, + vlan, + name, + properties, + }) + } + "create-etherstub" => { + let mut temporary = false; + let name = LinkName(input.shift_last_arg()?); + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else { + return Err(format!( + "Invalid arguments {}", + input.input() + )); + } + } + Ok(Self::CreateEtherstub { temporary, name }) + } + "delete-etherstub" => { + let mut temporary = false; + let name = LinkName(input.shift_last_arg()?); + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else { + return Err(format!( + "Invalid arguments {}", + input.input() + )); + } + } + Ok(Self::DeleteEtherstub { temporary, name }) + } + "delete-vnic" => { + let mut temporary = false; + let name = LinkName(input.shift_last_arg()?); + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else { + return Err(format!( + "Invalid arguments {}", + input.input() + )); + } + } + Ok(Self::DeleteVnic { temporary, name }) + } + "show-etherstub" => { + let name = input.shift_last_arg().map(|s| LinkName(s)).ok(); + input.no_args_remaining()?; + Ok(Self::ShowEtherstub { name }) + } + "show-link" => { + let name = LinkName(input.shift_last_arg()?); + if !input.shift_arg_if("-p")? { + return Err( + "You should ask for parsable output ('-p')".into() + ); + } + if !input.shift_arg_if("-o")? { + return Err( + "You should ask for specific outputs ('-o')".into() + ); + } + let fields = input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(); + input.no_args_remaining()?; + + Ok(Self::ShowLink { name, fields }) + } + "show-phys" => { + let mut mac = false; + if input.shift_arg_if("-m")? { + mac = true; + } + if !input.shift_arg_if("-p")? { + return Err( + "You should ask for parsable output ('-p')".into() + ); + } + if !input.shift_arg_if("-o")? { + return Err( + "You should ask for specific outputs ('-o')".into() + ); + } + let fields = input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(); + let name = input.shift_arg().map(|s| LinkName(s)).ok(); + input.no_args_remaining()?; + + Ok(Self::ShowPhys { mac, fields, name }) + } + "show-vnic" => { + let mut fields = None; + if input.shift_arg_if("-p")? { + if !input.shift_arg_if("-o")? { + return Err( + "You should ask for specific outputs ('-o')".into(), + ); + } + fields = Some( + input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(), + ); + } + + let name = input.shift_arg().map(|s| LinkName(s)).ok(); + input.no_args_remaining()?; + Ok(Self::ShowVnic { fields, name }) + } + "set-linkprop" => { + let mut temporary = false; + let mut properties = HashMap::new(); + let name = LinkName(input.shift_last_arg()?); + + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else if input.shift_arg_if("-p")? { + let props = input.shift_arg()?; + let props = props.split(','); + for prop in props { + let (k, v) = + prop.split_once('=').ok_or_else(|| { + format!("Bad property: {prop}") + })?; + properties.insert(k.to_string(), v.to_string()); + } + } else { + return Err(format!( + "Invalid arguments {}", + input.input() + )); + } + } + + if properties.is_empty() { + return Err("Missing properties".into()); + } + + Ok(Self::SetLinkprop { temporary, properties, name }) + } + command => Err(format!("Unsupported command: {}", command)), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn create_vnic() { + // Valid usage + let Command::CreateVnic { link, temporary, mac, vlan, name, properties } = Command::try_from( + Input::shell(format!("{DLADM} create-vnic -t -l mylink newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(link.0, "mylink"); + assert!(temporary); + assert!(mac.is_none()); + assert!(vlan.is_none()); + assert_eq!(name.0, "newlink"); + assert!(properties.is_empty()); + + // Valid usage + let Command::CreateVnic { link, temporary, mac, vlan, name, properties } = Command::try_from( + Input::shell(format!("{DLADM} create-vnic -l mylink -v 3 -m foobar -p mtu=123 newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(link.0, "mylink"); + assert!(!temporary); + assert_eq!(mac.unwrap(), "foobar"); + assert_eq!(vlan.unwrap(), VlanID::new(3).unwrap()); + assert_eq!(name.0, "newlink"); + assert_eq!( + properties, + HashMap::from([("mtu".to_string(), "123".to_string())]) + ); + + // Missing link + Command::try_from(Input::shell(format!("{DLADM} create-vnic newlink"))) + .unwrap_err(); + + // Missing name + Command::try_from(Input::shell(format!( + "{DLADM} create-vnic -l mylink" + ))) + .unwrap_err(); + + // Bad properties + Command::try_from(Input::shell(format!( + "{DLADM} create-vnic -l mylink -p foo=bar,baz mylink" + ))) + .unwrap_err(); + + // Unknown argument + Command::try_from(Input::shell(format!( + "{DLADM} create-vnic -l mylink --splorch mylink" + ))) + .unwrap_err(); + + // Missing command + Command::try_from(Input::shell(DLADM)).unwrap_err(); + + // Not dladm + Command::try_from(Input::shell("hello!")).unwrap_err(); + } + + #[test] + fn create_etherstub() { + // Valid usage + let Command::CreateEtherstub { temporary, name } = Command::try_from( + Input::shell(format!("{DLADM} create-etherstub -t newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert!(temporary); + assert_eq!(name.0, "newlink"); + + // Missing link + Command::try_from(Input::shell(format!("{DLADM} create-etherstub"))) + .unwrap_err(); + + // Invalid argument + Command::try_from(Input::shell(format!( + "{DLADM} create-etherstub --splorch mylink" + ))) + .unwrap_err(); + } + + #[test] + fn delete_etherstub() { + // Valid usage + let Command::DeleteEtherstub { temporary, name } = Command::try_from( + Input::shell(format!("{DLADM} delete-etherstub -t newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert!(temporary); + assert_eq!(name.0, "newlink"); + + // Missing link + Command::try_from(Input::shell(format!("{DLADM} delete-etherstub"))) + .unwrap_err(); + + // Invalid argument + Command::try_from(Input::shell(format!( + "{DLADM} delete-etherstub --splorch mylink" + ))) + .unwrap_err(); + } + + #[test] + fn delete_vnic() { + // Valid usage + let Command::DeleteVnic { temporary, name } = Command::try_from( + Input::shell(format!("{DLADM} delete-vnic -t newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert!(temporary); + assert_eq!(name.0, "newlink"); + + // Missing link + Command::try_from(Input::shell(format!("{DLADM} delete-vnic"))) + .unwrap_err(); + + // Invalid argument + Command::try_from(Input::shell(format!( + "{DLADM} delete-vnic --splorch mylink" + ))) + .unwrap_err(); + } + + #[test] + fn show_etherstub() { + // Valid usage + let Command::ShowEtherstub { name } = Command::try_from( + Input::shell(format!("{DLADM} show-etherstub newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(name.unwrap().0, "newlink"); + + // Valid usage + let Command::ShowEtherstub { name } = Command::try_from( + Input::shell(format!("{DLADM} show-etherstub")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert!(name.is_none()); + + // Invalid argument + Command::try_from(Input::shell(format!( + "{DLADM} show-etherstub --splorch mylink" + ))) + .unwrap_err(); + } + + #[test] + fn show_link() { + // Valid usage + let Command::ShowLink { name, fields } = Command::try_from( + Input::shell(format!("{DLADM} show-link -p -o LINK,STATE newlink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(name.0, "newlink"); + assert_eq!(fields[0], "LINK"); + assert_eq!(fields[1], "STATE"); + + // Missing link name + Command::try_from(Input::shell(format!("{DLADM} show-link"))) + .unwrap_err(); + + // Not asking for output + Command::try_from(Input::shell(format!("{DLADM} show-link mylink"))) + .unwrap_err(); + + // Not asking for parsable output + Command::try_from(Input::shell(format!( + "{DLADM} show-link -o LINK mylink" + ))) + .unwrap_err(); + } + + #[test] + fn show_phys() { + // Valid usage + let Command::ShowPhys{ mac, fields, name } = Command::try_from( + Input::shell(format!("{DLADM} show-phys -p -o LINK")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert!(!mac); + assert_eq!(fields[0], "LINK"); + assert!(name.is_none()); + + // Not asking for output + Command::try_from(Input::shell(format!("{DLADM} show-phys mylink"))) + .unwrap_err(); + + // Not asking for parsable output + Command::try_from(Input::shell(format!( + "{DLADM} show-phys -o LINK mylink" + ))) + .unwrap_err(); + } + + #[test] + fn show_vnic() { + // Valid usage + let Command::ShowVnic{ fields, name } = Command::try_from( + Input::shell(format!("{DLADM} show-vnic -p -o LINK")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(fields.unwrap(), vec!["LINK"]); + assert!(name.is_none()); + + // Valid usage + let Command::ShowVnic{ fields, name } = Command::try_from( + Input::shell(format!("{DLADM} show-vnic mylink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert!(fields.is_none()); + assert_eq!(name.unwrap().0, "mylink"); + + // Not asking for parsable output + Command::try_from(Input::shell(format!( + "{DLADM} show-vnic -o LINK mylink" + ))) + .unwrap_err(); + } + + #[test] + fn set_linkprop() { + // Valid usage + let Command::SetLinkprop { temporary, properties, name } = Command::try_from( + Input::shell(format!("{DLADM} set-linkprop -t -p mtu=123 mylink")) + ).unwrap() else { + panic!("Wrong command"); + }; + assert!(temporary); + assert_eq!( + properties, + HashMap::from([("mtu".to_string(), "123".to_string())]) + ); + assert_eq!(name.0, "mylink"); + + // Missing properties + Command::try_from(Input::shell(format!("{DLADM} set-linkprop mylink"))) + .unwrap_err(); + + // Bad property + Command::try_from(Input::shell(format!( + "{DLADM} set-linkprop -p bar mylink" + ))) + .unwrap_err(); + + // Missing link + Command::try_from(Input::shell(format!( + "{DLADM} set-linkprop -p foo=bar" + ))) + .unwrap_err(); + } +} diff --git a/helios/tokamak/src/cli/ipadm.rs b/helios/tokamak/src/cli/ipadm.rs new file mode 100644 index 00000000000..95f8dbd7f5d --- /dev/null +++ b/helios/tokamak/src/cli/ipadm.rs @@ -0,0 +1,347 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::{AddrType, IpInterfaceName}; + +use helios_fusion::addrobj::AddrObject; +use helios_fusion::Input; +use helios_fusion::IPADM; +use ipnetwork::IpNetwork; +use std::collections::HashMap; +use std::str::FromStr; + +pub(crate) enum Command { + CreateAddr { + temporary: bool, + ty: AddrType, + addrobj: AddrObject, + }, + CreateIf { + temporary: bool, + name: IpInterfaceName, + }, + DeleteAddr { + addrobj: AddrObject, + }, + DeleteIf { + name: IpInterfaceName, + }, + ShowIf { + properties: Vec, + name: IpInterfaceName, + }, + SetIfprop { + temporary: bool, + properties: HashMap, + module: String, + name: IpInterfaceName, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != IPADM { + return Err(format!("Not ipadm command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + match input.shift_arg()?.as_str() { + "create-addr" => { + let temporary = input.shift_arg_if("-t")?; + input.shift_arg_expect("-T")?; + + let ty = match input.shift_arg()?.as_str() { + "static" => { + input.shift_arg_expect("-a")?; + let addr = input.shift_arg()?; + AddrType::Static( + IpNetwork::from_str(&addr) + .map_err(|e| e.to_string())?, + ) + } + "dhcp" => AddrType::Dhcp, + "addrconf" => AddrType::Addrconf, + ty => return Err(format!("Unknown address type {ty}")), + }; + let addrobj = AddrObject::from_str(&input.shift_arg()?) + .map_err(|e| e.to_string())?; + input.no_args_remaining()?; + Ok(Command::CreateAddr { temporary, ty, addrobj }) + } + "create-ip" | "create-if" => { + let temporary = input.shift_arg_if("-t")?; + let name = IpInterfaceName(input.shift_arg()?); + input.no_args_remaining()?; + Ok(Command::CreateIf { temporary, name }) + } + "delete-addr" => { + let addrobj = AddrObject::from_str(&input.shift_arg()?) + .map_err(|e| e.to_string())?; + input.no_args_remaining()?; + Ok(Command::DeleteAddr { addrobj }) + } + "delete-ip" | "delete-if" => { + let name = IpInterfaceName(input.shift_arg()?); + input.no_args_remaining()?; + Ok(Command::DeleteIf { name }) + } + "show-if" => { + let name = IpInterfaceName(input.shift_last_arg()?); + let mut properties = vec![]; + while !input.args().is_empty() { + if input.shift_arg_if("-p")? { + input.shift_arg_expect("-o")?; + properties = input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(); + } else { + return Err(format!( + "Unexpected input: {}", + input.input() + )); + } + } + + Ok(Command::ShowIf { properties, name }) + } + "set-ifprop" => { + let name = IpInterfaceName(input.shift_last_arg()?); + + let mut temporary = false; + let mut properties = HashMap::new(); + let mut module = "ip".to_string(); + + while !input.args().is_empty() { + if input.shift_arg_if("-t")? { + temporary = true; + } else if input.shift_arg_if("-m")? { + module = input.shift_arg()?; + } else if input.shift_arg_if("-p")? { + let props = input.shift_arg()?; + let props = props.split(','); + for prop in props { + let (k, v) = + prop.split_once('=').ok_or_else(|| { + format!("Bad property: {prop}") + })?; + properties.insert(k.to_string(), v.to_string()); + } + } else { + return Err(format!( + "Unexpected input: {}", + input.input() + )); + } + } + + Ok(Command::SetIfprop { temporary, properties, module, name }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn create_addr() { + // Valid command + let Command::CreateAddr { temporary, ty, addrobj } = Command::try_from( + Input::shell(format!("{IPADM} create-addr -t -T addrconf foo/bar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert!(temporary); + assert!(matches!(ty, AddrType::Addrconf)); + assert_eq!("foo/bar", addrobj.to_string()); + + // Valid command + let Command::CreateAddr { temporary, ty, addrobj } = Command::try_from( + Input::shell(format!("{IPADM} create-addr -T static -a ::/32 foo/bar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert!(!temporary); + assert_eq!(ty, AddrType::Static(IpNetwork::from_str("::/32").unwrap())); + assert_eq!("foo/bar", addrobj.to_string()); + + // Bad type + assert!(Command::try_from(Input::shell(format!( + "{IPADM} create-addr -T quadratric foo/bar" + ))) + .err() + .unwrap() + .contains("Unknown address type")); + + // Missing name + assert!(Command::try_from(Input::shell(format!( + "{IPADM} create-addr -T dhcp" + ))) + .err() + .unwrap() + .contains("Missing argument")); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} create-addr -T dhcp foo/bar baz" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + + // Not addrobject + assert!(Command::try_from(Input::shell(format!( + "{IPADM} create-addr -T dhcp foobar" + ))) + .err() + .unwrap() + .contains("Failed to parse addrobj name")); + } + + #[test] + fn create_if() { + // Valid command + let Command::CreateIf { temporary, name } = Command::try_from( + Input::shell(format!("{IPADM} create-if foobar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert!(!temporary); + assert_eq!(name.0, "foobar"); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} create-if foo bar" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + } + + #[test] + fn delete_addr() { + // Valid command + let Command::DeleteAddr { addrobj } = Command::try_from( + Input::shell(format!("{IPADM} delete-addr foo/bar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert_eq!(addrobj.to_string(), "foo/bar"); + + // Not addrobject + assert!(Command::try_from(Input::shell(format!( + "{IPADM} delete-addr foobar" + ))) + .err() + .unwrap() + .contains("Failed to parse addrobj name")); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} delete-addr foo/bar foo/bar" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + } + + #[test] + fn delete_if() { + // Valid command + let Command::DeleteIf { name } = Command::try_from( + Input::shell(format!("{IPADM} delete-if foobar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert_eq!(name.0, "foobar"); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} delete-if foo bar" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + } + + #[test] + fn show_if() { + // Valid command + let Command::ShowIf { properties, name } = Command::try_from( + Input::shell(format!("{IPADM} show-if foobar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert!(properties.is_empty()); + assert_eq!(name.0, "foobar"); + + // Valid command + let Command::ShowIf { properties, name } = Command::try_from( + Input::shell(format!("{IPADM} show-if -p -o IFNAME foobar")) + ).unwrap() else { + panic!("Wrong command") + }; + assert_eq!(properties[0], "IFNAME"); + assert_eq!(name.0, "foobar"); + + // Non parsable output + Command::try_from(Input::shell(format!( + "{IPADM} show-if -o IFNAME foobar" + ))) + .err() + .unwrap(); + + // Not asking for specific field + Command::try_from(Input::shell(format!("{IPADM} show-if -p foobar"))) + .err() + .unwrap(); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} show-if fizz buzz" + ))) + .err() + .unwrap() + .contains("Unexpected input")); + } + + #[test] + fn set_ifprop() { + // Valid command + let Command::SetIfprop { temporary, properties, module, name } = Command::try_from( + Input::shell(format!("{IPADM} set-ifprop -t -m ipv4 -p mtu=123 foo")) + ).unwrap() else { + panic!("Wrong command") + }; + + assert!(temporary); + assert_eq!(properties["mtu"], "123"); + assert_eq!(module, "ipv4"); + assert_eq!(name.0, "foo"); + + // Bad property + assert!(Command::try_from(Input::shell(format!( + "{IPADM} set-ifprop -p blarg foo" + ))) + .err() + .unwrap() + .contains("Bad property: blarg")); + + // Too many arguments + assert!(Command::try_from(Input::shell(format!( + "{IPADM} set-ifprop -p mtu=123 foo bar" + ))) + .err() + .unwrap() + .contains("Unexpected input")); + } +} diff --git a/helios/tokamak/src/cli/mod.rs b/helios/tokamak/src/cli/mod.rs new file mode 100644 index 00000000000..f75c144bda4 --- /dev/null +++ b/helios/tokamak/src/cli/mod.rs @@ -0,0 +1,109 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Parsing of CLI-based arguments to a Helios system + +// TODO: REMOVE +#![allow(dead_code)] + +use crate::host::ZoneName; + +use helios_fusion::Input; +use helios_fusion::{ + DLADM, IPADM, PFEXEC, ROUTE, SVCADM, SVCCFG, ZFS, ZLOGIN, ZONEADM, ZONECFG, + ZPOOL, +}; + +// Command-line utilities +pub(crate) mod dladm; +pub(crate) mod ipadm; +pub(crate) mod route; +pub(crate) mod svcadm; +pub(crate) mod svccfg; +pub(crate) mod zfs; +pub(crate) mod zoneadm; +pub(crate) mod zonecfg; +pub(crate) mod zpool; + +// Utilities for parsing +mod parse; + +use crate::cli::parse::InputExt; + +pub(crate) enum KnownCommand { + Coreadm, // TODO + Dladm(dladm::Command), + Dumpadm, // TODO + Ipadm(ipadm::Command), + Fstyp, // TODO + RouteAdm, // TODO + Route(route::Command), + Savecore, // TODO + Svccfg(svccfg::Command), + Svcadm(svcadm::Command), + Zfs(zfs::Command), + Zoneadm(zoneadm::Command), + Zonecfg(zonecfg::Command), + Zpool(zpool::Command), +} + +pub(crate) struct Command { + with_pfexec: bool, + in_zone: Option, + cmd: KnownCommand, +} + +impl Command { + pub fn with_pfexec(&self) -> bool { + self.with_pfexec + } + pub fn in_zone(&self) -> &Option { + &self.in_zone + } + pub fn cmd(&self) -> &KnownCommand { + &self.cmd + } + pub fn as_cmd(self) -> KnownCommand { + self.cmd + } +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(mut input: Input) -> Result { + let mut with_pfexec = false; + let mut in_zone = None; + + while input.program == PFEXEC { + with_pfexec = true; + input.shift_program()?; + } + if input.program == ZLOGIN { + input + .shift_program() + .map_err(|_| "Missing zone name".to_string())?; + in_zone = + Some(ZoneName(input.shift_program().map_err(|_| { + "Missing command to run in zone".to_string() + })?)); + } + + use KnownCommand::*; + let cmd = match input.program.as_str() { + DLADM => Dladm(dladm::Command::try_from(input)?), + IPADM => Ipadm(ipadm::Command::try_from(input)?), + ROUTE => Route(route::Command::try_from(input)?), + SVCCFG => Svccfg(svccfg::Command::try_from(input)?), + SVCADM => Svcadm(svcadm::Command::try_from(input)?), + ZFS => Zfs(zfs::Command::try_from(input)?), + ZONEADM => Zoneadm(zoneadm::Command::try_from(input)?), + ZONECFG => Zonecfg(zonecfg::Command::try_from(input)?), + ZPOOL => Zpool(zpool::Command::try_from(input)?), + _ => return Err(format!("Unknown command: {}", input.program)), + }; + + Ok(Command { with_pfexec, in_zone, cmd }) + } +} diff --git a/helios/tokamak/src/cli/parse.rs b/helios/tokamak/src/cli/parse.rs new file mode 100644 index 00000000000..b688d5e9101 --- /dev/null +++ b/helios/tokamak/src/cli/parse.rs @@ -0,0 +1,113 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use helios_fusion::Input; + +pub(crate) trait InputExt { + /// Shifts out the program, putting the subsequent argument in its place. + /// + /// Returns the prior program value. + fn shift_program(&mut self) -> Result; +} + +impl InputExt for Input { + fn shift_program(&mut self) -> Result { + if self.args.is_empty() { + return Err(format!( + "Failed to parse {self}, expected more arguments" + )); + } + let new = self.args.remove(0); + let new = helios_fusion::which_binary(&new).to_string(); + let old = std::mem::replace(&mut self.program, new); + Ok(old) + } +} + +pub(crate) struct InputParser { + input: Input, + start: usize, + end: usize, +} + +impl InputParser { + pub(crate) fn new(input: Input) -> Self { + let end = input.args.len(); + Self { input, start: 0, end } + } + + pub(crate) fn input(&self) -> &Input { + &self.input + } + + pub(crate) fn args(&self) -> &[String] { + &self.input.args[self.start..self.end] + } + + pub(crate) fn no_args_remaining(&self) -> Result<(), String> { + if self.start < self.end { + return Err(format!( + "Unexpected extra arguments: {:?}", + self.args() + )); + } + Ok(()) + } + + /// Reemoves the last argument unconditionally. + pub(crate) fn shift_last_arg(&mut self) -> Result { + if self.start >= self.end { + return Err("Missing argument".to_string()); + } + let arg = self + .input + .args + .get(self.end - 1) + .ok_or_else(|| "Missing argument")?; + self.end -= 1; + Ok(arg.to_string()) + } + + /// Removes the next argument unconditionally. + pub(crate) fn shift_arg(&mut self) -> Result { + if self.start >= self.end { + return Err("Missing argument".to_string()); + } + let arg = self + .input + .args + .get(self.start) + .ok_or_else(|| "Missing argument")?; + self.start += 1; + Ok(arg.to_string()) + } + + /// Removes the next argument, which must equal the provided value. + pub(crate) fn shift_arg_expect( + &mut self, + value: &str, + ) -> Result<(), String> { + let v = self.shift_arg()?; + if value != v { + return Err(format!("Unexpected argument {v} (expected: {value}")); + } + Ok(()) + } + + /// Removes the next argument if it equals `value`. + /// + /// Returns if it was equal. + pub(crate) fn shift_arg_if(&mut self, value: &str) -> Result { + let eq = self + .input + .args + .get(self.start) + .ok_or_else(|| "Missing argument")? + == value; + if eq { + self.shift_arg()?; + } + Ok(eq) + } +} diff --git a/helios/tokamak/src/cli/route.rs b/helios/tokamak/src/cli/route.rs new file mode 100644 index 00000000000..569a67e0173 --- /dev/null +++ b/helios/tokamak/src/cli/route.rs @@ -0,0 +1,135 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::LinkName; + +use helios_fusion::Input; +use helios_fusion::ROUTE; +use ipnetwork::IpNetwork; +use std::str::FromStr; + +#[derive(Debug, PartialEq, Eq)] +pub enum RouteTarget { + Default, + DefaultV4, + DefaultV6, + ByAddress(IpNetwork), +} + +impl RouteTarget { + fn shift_target(input: &mut InputParser) -> Result { + let force_v4 = input.shift_arg_if("-inet")?; + let force_v6 = input.shift_arg_if("-inet6")?; + + let target = match (force_v4, force_v6, input.shift_arg()?.as_str()) { + (true, true, _) => { + return Err("Cannot force both v4 and v6".to_string()) + } + (true, false, "default") => RouteTarget::DefaultV4, + (false, true, "default") => RouteTarget::DefaultV6, + (false, false, "default") => RouteTarget::Default, + (_, _, other) => { + let net = + IpNetwork::from_str(other).map_err(|e| e.to_string())?; + if force_v4 && !net.is_ipv4() { + return Err(format!("{net} is not ipv4")); + } + if force_v6 && !net.is_ipv6() { + return Err(format!("{net} is not ipv6")); + } + RouteTarget::ByAddress(net) + } + }; + Ok(target) + } +} + +pub(crate) enum Command { + Add { + destination: RouteTarget, + gateway: RouteTarget, + interface: Option, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != ROUTE { + return Err(format!("Not route command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + match input.shift_arg()?.as_str() { + "add" => { + let destination = RouteTarget::shift_target(&mut input)?; + let gateway = RouteTarget::shift_target(&mut input)?; + + let interface = if let Ok(true) = input.shift_arg_if("-ifp") { + Some(LinkName(input.shift_arg()?)) + } else { + None + }; + input.no_args_remaining()?; + Ok(Command::Add { destination, gateway, interface }) + } + command => return Err(format!("Unsupported command: {}", command)), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use ipnetwork::IpNetwork; + use std::str::FromStr; + + #[test] + fn add() { + // Valid command + let Command::Add { destination, gateway, interface } = + Command::try_from(Input::shell(format!( + "{ROUTE} add -inet6 fd00::/16 default -ifp mylink" + ))) + .unwrap(); + assert_eq!( + destination, + RouteTarget::ByAddress(IpNetwork::from_str("fd00::/16").unwrap()) + ); + assert_eq!(gateway, RouteTarget::Default); + assert_eq!(interface.unwrap().0, "mylink"); + + // Valid command + let Command::Add { destination, gateway, interface } = + Command::try_from(Input::shell(format!( + "{ROUTE} add -inet default 127.0.0.1/8" + ))) + .unwrap(); + assert_eq!(destination, RouteTarget::DefaultV4); + assert_eq!( + gateway, + RouteTarget::ByAddress(IpNetwork::from_str("127.0.0.1/8").unwrap()) + ); + assert!(interface.is_none()); + + // Invalid address family + assert!(Command::try_from(Input::shell(format!( + "{ROUTE} add -inet -inet6 default 127.0.0.1/8" + ))) + .err() + .unwrap() + .contains("Cannot force both v4 and v6")); + + // Invalid address family + assert!(Command::try_from(Input::shell(format!( + "{ROUTE} add -inet6 default -inet6 127.0.0.1/8" + ))) + .err() + .unwrap() + .contains("127.0.0.1/8 is not ipv6")); + } +} diff --git a/helios/tokamak/src/cli/svcadm.rs b/helios/tokamak/src/cli/svcadm.rs new file mode 100644 index 00000000000..eccea13a65f --- /dev/null +++ b/helios/tokamak/src/cli/svcadm.rs @@ -0,0 +1,93 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::{ServiceName, ZoneName}; + +use helios_fusion::Input; +use helios_fusion::SVCADM; + +pub enum Command { + Enable { zone: Option, service: ServiceName }, + Disable { zone: Option, service: ServiceName }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != SVCADM { + return Err(format!("Not svcadm command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + let zone = if input.shift_arg_if("-z")? { + Some(ZoneName(input.shift_arg()?)) + } else { + None + }; + + match input.shift_arg()?.as_str() { + "enable" => { + // Intentionally ignored + input.shift_arg_if("-t")?; + let service = ServiceName(input.shift_arg()?); + input.no_args_remaining()?; + Ok(Command::Enable { zone, service }) + } + "disable" => { + // Intentionally ignored + input.shift_arg_if("-t")?; + let service = ServiceName(input.shift_arg()?); + input.no_args_remaining()?; + Ok(Command::Disable { zone, service }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn enable() { + let Command::Enable { zone, service } = Command::try_from( + Input::shell(format!( + "{SVCADM} -z myzone enable -t foobar" + )), + ).unwrap() else { + panic!("wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(service.0, "foobar"); + + assert!(Command::try_from(Input::shell(format!("{SVCADM} enable"))) + .err() + .unwrap() + .contains("Missing argument")); + } + + #[test] + fn disable() { + let Command::Disable { zone, service } = Command::try_from( + Input::shell(format!( + "{SVCADM} -z myzone disable -t foobar" + )), + ).unwrap() else { + panic!("wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(service.0, "foobar"); + + assert!(Command::try_from(Input::shell(format!("{SVCADM} disable"))) + .err() + .unwrap() + .contains("Missing argument")); + } +} diff --git a/helios/tokamak/src/cli/svccfg.rs b/helios/tokamak/src/cli/svccfg.rs new file mode 100644 index 00000000000..5520702e3da --- /dev/null +++ b/helios/tokamak/src/cli/svccfg.rs @@ -0,0 +1,367 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::{ServiceName, ZoneName}; + +use camino::Utf8PathBuf; +use helios_fusion::Input; +use helios_fusion::SVCCFG; +use std::str::FromStr; + +pub(crate) enum Command { + Addpropvalue { + zone: Option, + fmri: ServiceName, + key: smf::PropertyName, + ty: Option, + value: String, + }, + Addpg { + zone: Option, + fmri: ServiceName, + group: smf::PropertyGroupName, + group_type: String, + }, + Delpg { + zone: Option, + fmri: ServiceName, + group: smf::PropertyGroupName, + }, + Delpropvalue { + zone: Option, + fmri: ServiceName, + name: smf::PropertyName, + glob: String, + }, + Import { + zone: Option, + file: Utf8PathBuf, + }, + Refresh { + zone: Option, + fmri: ServiceName, + }, + Setprop { + zone: Option, + fmri: ServiceName, + name: smf::PropertyName, + value: String, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != SVCCFG { + return Err(format!("Not svccfg command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + let zone = if input.shift_arg_if("-z")? { + Some(ZoneName(input.shift_arg()?)) + } else { + None + }; + + let fmri = if input.shift_arg_if("-s")? { + Some(ServiceName(input.shift_arg()?)) + } else { + None + }; + + match input.shift_arg()?.as_str() { + "addpropvalue" => { + let name = input.shift_arg()?; + let name = smf::PropertyName::from_str(&name) + .map_err(|e| e.to_string())?; + + let type_or_value = input.shift_arg()?; + let (ty, value) = match input.shift_arg().ok() { + Some(value) => { + let ty = type_or_value + .strip_suffix(':') + .ok_or_else(|| { + format!("Bad property type: {type_or_value}") + })? + .to_string(); + (Some(ty), value) + } + None => (None, type_or_value), + }; + + let fmri = + fmri.ok_or_else(|| "-s option required for addpropvalue")?; + + input.no_args_remaining()?; + Ok(Command::Addpropvalue { zone, fmri, key: name, ty, value }) + } + "addpg" => { + let name = input.shift_arg()?; + let group = smf::PropertyGroupName::new(&name) + .map_err(|e| e.to_string())?; + + let group_type = input.shift_arg()?; + if let Some(_flags) = input.shift_arg().ok() { + return Err( + "Parsing of optional flags not implemented".to_string() + ); + } + let fmri = + fmri.ok_or_else(|| "-s option required for addpg")?; + + input.no_args_remaining()?; + Ok(Command::Addpg { zone, fmri, group, group_type }) + } + "delpg" => { + let name = input.shift_arg()?; + let group = smf::PropertyGroupName::new(&name) + .map_err(|e| e.to_string())?; + let fmri = + fmri.ok_or_else(|| "-s option required for delpg")?; + + input.no_args_remaining()?; + Ok(Command::Delpg { zone, fmri, group }) + } + "delpropvalue" => { + let name = input.shift_arg()?; + let name = smf::PropertyName::from_str(&name) + .map_err(|e| e.to_string())?; + let fmri = + fmri.ok_or_else(|| "-s option required for delpropvalue")?; + let glob = input.shift_arg()?; + + input.no_args_remaining()?; + Ok(Command::Delpropvalue { zone, fmri, name, glob }) + } + "import" => { + let file = input.shift_arg()?; + if let Some(_) = fmri { + return Err( + "Cannot use '-s' option with import".to_string() + ); + } + input.no_args_remaining()?; + Ok(Command::Import { zone, file: file.into() }) + } + "refresh" => { + let fmri = + fmri.ok_or_else(|| "-s option required for refresh")?; + input.no_args_remaining()?; + Ok(Command::Refresh { zone, fmri }) + } + "setprop" => { + let fmri = + fmri.ok_or_else(|| "-s option required for setprop")?; + + // Setprop seems fine accepting args of the form: + // - name=value + // - name = value + // - name = type: value (NOTE: not yet supported) + let first_arg = input.shift_arg()?; + let (name, value) = + if let Some((name, value)) = first_arg.split_once('=') { + (name.to_string(), value.to_string()) + } else { + let name = first_arg; + input.shift_arg_expect("=")?; + let value = input.shift_arg()?; + (name, value) + }; + + let name = smf::PropertyName::from_str(&name) + .map_err(|e| e.to_string())?; + + input.no_args_remaining()?; + Ok(Command::Setprop { zone, fmri, name, value }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn addpropvalue() { + let Command::Addpropvalue { zone, fmri, key, ty, value } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s svc:/myservice:default addpropvalue foo/bar astring: baz" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "svc:/myservice:default"); + assert_eq!(key.to_string(), "foo/bar"); + assert_eq!(ty, Some("astring".to_string())); + assert_eq!(value, "baz"); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} addpropvalue foo/bar baz" + ))) + .err() + .unwrap() + .contains("-s option required")); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} -s svc:/mysvc addpropvalue foo/bar astring baz" + ))) + .err() + .unwrap() + .contains("Bad property type")); + } + + #[test] + fn addpg() { + let Command::Addpg { zone, fmri, group, group_type } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s svc:/myservice:default addpg foo baz" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "svc:/myservice:default"); + assert_eq!(group.to_string(), "foo"); + assert_eq!(group_type, "baz"); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} addpg foo baz" + ))) + .err() + .unwrap() + .contains("-s option required")); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} addpg foo baz P" + ))) + .err() + .unwrap() + .contains("Parsing of optional flags not implemented")); + } + + #[test] + fn delpg() { + let Command::Delpg { zone, fmri, group } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s svc:/myservice:default delpg foo" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "svc:/myservice:default"); + assert_eq!(group.to_string(), "foo"); + + assert!(Command::try_from(Input::shell(format!("{SVCCFG} delpg foo"))) + .err() + .unwrap() + .contains("-s option required")); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} -s mysvc delpg foo baz" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + } + + #[test] + fn import() { + let Command::Import { zone, file } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone import myfile" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(file, "myfile"); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} import myfile myotherfile" + ))) + .err() + .unwrap() + .contains("Unexpected extra arguments")); + + assert!(Command::try_from(Input::shell(format!( + "{SVCCFG} -s myservice import myfile" + ))) + .err() + .unwrap() + .contains("Cannot use '-s' option with import")); + } + + #[test] + fn refresh() { + let Command::Refresh { zone, fmri } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s myservice refresh" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "myservice"); + } + + #[test] + fn setprop() { + let Command::Setprop { zone, fmri, name, value } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s myservice setprop foo/bar=baz" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "myservice"); + assert_eq!(name.to_string(), "foo/bar"); + assert_eq!(value, "baz"); + + // Try that command again, but with spaces + let Command::Setprop { zone, fmri, name, value } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s myservice setprop foo/bar = baz" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "myservice"); + assert_eq!(name.to_string(), "foo/bar"); + assert_eq!(value, "baz"); + + // Try that command again, but with quotes + let Command::Setprop { zone, fmri, name, value } = Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s myservice setprop foo/bar = \"fizz buzz\"" + )) + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(zone.unwrap().0, "myzone"); + assert_eq!(fmri.0, "myservice"); + assert_eq!(name.to_string(), "foo/bar"); + assert_eq!(value, "fizz buzz"); + + assert!(Command::try_from( + Input::shell(format!( + "{SVCCFG} -z myzone -s myservice setprop foo/bar = \"fizz buzz\" blat" + )) + ).err().unwrap().contains("Unexpected extra arguments")); + } +} diff --git a/helios/tokamak/src/cli/zfs.rs b/helios/tokamak/src/cli/zfs.rs new file mode 100644 index 00000000000..6de77294c6c --- /dev/null +++ b/helios/tokamak/src/cli/zfs.rs @@ -0,0 +1,527 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::types::dataset; + +use helios_fusion::Input; +use helios_fusion::ZFS; +use std::collections::HashMap; + +pub(crate) enum Command { + CreateFilesystem { + properties: HashMap, + name: dataset::Name, + }, + CreateVolume { + properties: HashMap, + sparse: bool, + blocksize: Option, + size: u64, + name: dataset::Name, + }, + Destroy { + recursive_dependents: bool, + recursive_children: bool, + force_unmount: bool, + name: dataset::Name, + }, + Get { + recursive: bool, + depth: Option, + // name, property, value, source + fields: Vec, + properties: Vec, + datasets: Option>, + }, + List { + recursive: bool, + depth: Option, + properties: Vec, + datasets: Option>, + }, + Mount { + load_keys: bool, + filesystem: dataset::Name, + }, + Set { + properties: HashMap, + name: dataset::Name, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != ZFS { + return Err(format!("Not zfs command: {}", input.program)); + } + + let mut input = InputParser::new(input); + match input.shift_arg()?.as_str() { + "create" => { + let mut size = None; + let mut blocksize = None; + let mut sparse = None; + let mut properties = HashMap::new(); + + while input.args().len() > 1 { + // Volume Size (volumes only, required) + if input.shift_arg_if("-V")? { + let size_str = input.shift_arg()?; + + let (size_str, multiplier) = if let Some(size_str) = + size_str.strip_suffix('G') + { + (size_str, (1 << 30)) + } else if let Some(size_str) = + size_str.strip_suffix('M') + { + (size_str, (1 << 20)) + } else if let Some(size_str) = + size_str.strip_suffix('K') + { + (size_str, (1 << 10)) + } else { + (size_str.as_str(), 1) + }; + + size = Some( + size_str + .parse::() + .map_err(|e| e.to_string())? + * multiplier, + ); + // Sparse (volumes only, optional) + } else if input.shift_arg_if("-s")? { + sparse = Some(true); + // Block size (volumes only, optional) + } else if input.shift_arg_if("-b")? { + blocksize = Some( + input + .shift_arg()? + .parse::() + .map_err(|e| e.to_string())?, + ); + // Properties + } else if input.shift_arg_if("-o")? { + let prop = input.shift_arg()?; + let (k, v) = prop + .split_once('=') + .ok_or_else(|| format!("Bad property: {prop}"))?; + let prop = dataset::Property::try_from(k) + .map_err(|e| format!("Unknown property: {e}"))?; + + properties.insert(prop, v.to_string()); + } else { + let arg = input.shift_arg()?; + return Err(format!("Unexpected argument: {arg}")); + } + } + let name = input.shift_arg()?; + input.no_args_remaining()?; + + if let Some(size) = size { + // Volume + let sparse = sparse.unwrap_or(false); + let name = dataset::Name::new(name)?; + Ok(Command::CreateVolume { + properties, + sparse, + blocksize, + size, + name, + }) + } else { + // Filesystem + if sparse.is_some() || blocksize.is_some() { + return Err("Using volume arguments, but forgot to specify '-V size'?".to_string()); + } + let name = dataset::Name::new(name)?; + Ok(Command::CreateFilesystem { properties, name }) + } + } + "destroy" => { + let mut recursive_dependents = false; + let mut recursive_children = false; + let mut force_unmount = false; + let mut name = None; + + while !input.args().is_empty() { + let arg = input.shift_arg()?; + let mut chars = arg.chars(); + if let Some('-') = chars.next() { + while let Some(c) = chars.next() { + match c { + 'R' => recursive_dependents = true, + 'r' => recursive_children = true, + 'f' => force_unmount = true, + c => { + return Err(format!( + "Unrecognized option '-{c}'" + )) + } + } + } + } else { + name = Some(dataset::Name::new(arg)?); + input.no_args_remaining()?; + } + } + let name = name.ok_or_else(|| "Missing name".to_string())?; + Ok(Command::Destroy { + recursive_dependents, + recursive_children, + force_unmount, + name, + }) + } + "get" => { + let mut scripting = false; + let mut parsable = false; + let mut recursive = false; + let mut depth = None; + let mut fields = ["name", "property", "value", "source"] + .map(String::from) + .to_vec(); + let mut properties = vec![]; + + while !input.args().is_empty() { + let arg = input.shift_arg()?; + let mut chars = arg.chars(); + // ZFS list lets callers pass in flags in groups, or + // separately. + if let Some('-') = chars.next() { + while let Some(c) = chars.next() { + match c { + 'r' => recursive = true, + 'H' => scripting = true, + 'p' => parsable = true, + 'd' => { + let depth_raw = + if chars.clone().next().is_some() { + chars.collect::() + } else { + input.shift_arg()? + }; + depth = Some( + depth_raw + .parse::() + .map_err(|e| e.to_string())?, + ); + // Convince the compiler we won't use any + // more 'chars', because used them all + // parsing 'depth'. + break; + } + 'o' => { + if chars.next().is_some() { + return Err("-o should be immediately followed by fields".to_string()); + } + fields = input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(); + } + c => { + return Err(format!( + "Unrecognized option '-{c}'" + )) + } + } + } + } else { + properties = arg + .split(',') + .map(|s| { + dataset::Property::try_from(s).map_err(|err| { + format!("unknown property: {err}") + }) + }) + .collect::, String>>( + )?; + break; + } + } + + let datasets = Some( + input + .args() + .into_iter() + .map(|s| dataset::Name::new(s.to_string())) + .collect::, _>>()?, + ); + if !scripting || !parsable { + return Err("You should run 'zfs get' commands with the '-Hp' flags enabled".to_string()); + } + + Ok(Command::Get { + recursive, + depth, + fields, + properties, + datasets, + }) + } + "list" => { + let mut scripting = false; + let mut parsable = false; + let mut recursive = false; + let mut depth = None; + let mut properties = vec![]; + let mut datasets = None; + + while !input.args().is_empty() { + let arg = input.shift_arg()?; + let mut chars = arg.chars(); + // ZFS list lets callers pass in flags in groups, or + // separately. + if let Some('-') = chars.next() { + while let Some(c) = chars.next() { + match c { + 'r' => recursive = true, + 'H' => scripting = true, + 'p' => parsable = true, + 'd' => { + let depth_raw = + if chars.clone().next().is_some() { + chars.collect::() + } else { + input.shift_arg()? + }; + depth = Some( + depth_raw + .parse::() + .map_err(|e| e.to_string())?, + ); + // Convince the compiler we won't use any + // more 'chars', because used them all + // parsing 'depth'. + break; + } + 'o' => { + if chars.next().is_some() { + return Err("-o should be immediately followed by properties".to_string()); + } + properties = input + .shift_arg()? + .split(',') + .map(|s| { + dataset::Property::try_from(s) + .map_err(|err| format!("unknown property: {err}")) + }) + .collect::, String>>()?; + } + c => { + return Err(format!( + "Unrecognized option '-{c}'" + )) + } + } + } + } else { + // As soon as non-flag arguments are passed, the rest of + // the arguments are treated as datasets. + datasets = + Some(vec![dataset::Name::new(arg.to_string())?]); + break; + } + } + + let remaining_datasets = input.args(); + if !remaining_datasets.is_empty() { + datasets.get_or_insert(vec![]).extend( + remaining_datasets + .into_iter() + .map(|d| dataset::Name::new(d.to_string())) + .collect::, _>>()?, + ); + }; + + if !scripting || !parsable { + return Err("You should run 'zfs list' commands with the '-Hp' flags enabled".to_string()); + } + + if properties.is_empty() { + properties = vec![dataset::Property::Name]; + } + + Ok(Command::List { recursive, depth, properties, datasets }) + } + "mount" => { + let load_keys = input.shift_arg_if("-l")?; + let filesystem = dataset::Name::new(input.shift_arg()?)?; + input.no_args_remaining()?; + Ok(Command::Mount { load_keys, filesystem }) + } + "set" => { + let mut properties = HashMap::new(); + + while input.args().len() > 1 { + let prop = input.shift_arg()?; + let (k, v) = prop + .split_once('=') + .ok_or_else(|| format!("Bad property: {prop}"))?; + let prop = dataset::Property::try_from(k) + .map_err(|e| format!("Unknown property: {e}"))?; + properties.insert(prop, v.to_string()); + } + let name = dataset::Name::new(input.shift_arg()?)?; + input.no_args_remaining()?; + + Ok(Command::Set { properties, name }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn create() { + // Create a filesystem + let Command::CreateFilesystem { properties, name } = Command::try_from( + Input::shell(format!("{ZFS} create myfilesystem")) + ).unwrap() else { panic!("wrong command") }; + assert_eq!(properties, HashMap::new()); + assert_eq!(name.as_str(), "myfilesystem"); + + // Create a volume + let Command::CreateVolume { properties, sparse, blocksize, size, name } = Command::try_from( + Input::shell(format!("{ZFS} create -s -V 1024 -b 512 -o logbias=bar myvolume")) + ).unwrap() else { panic!("wrong command") }; + assert_eq!( + properties, + HashMap::from([(dataset::Property::Logbias, "bar".to_string())]) + ); + assert_eq!(name.as_str(), "myvolume"); + assert!(sparse); + assert_eq!(size, 1024); + assert_eq!(blocksize, Some(512)); + + // Create a volume (using letter suffix) + let Command::CreateVolume { properties, sparse, blocksize, size, name } = Command::try_from( + Input::shell(format!("{ZFS} create -s -V 2G -b 512 -o logbias=bar myvolume")) + ).unwrap() else { panic!("wrong command") }; + assert_eq!( + properties, + HashMap::from([(dataset::Property::Logbias, "bar".to_string())]) + ); + assert_eq!(name.as_str(), "myvolume"); + assert!(sparse); + assert_eq!(size, 2 << 30); + assert_eq!(blocksize, Some(512)); + + // Create volume (invalid) + assert!(Command::try_from(Input::shell(format!( + "{ZFS} create -s -b 512 -o logbias=bar myvolume" + ))) + .err() + .unwrap() + .contains("Using volume arguments, but forgot to specify '-V size'")); + } + + #[test] + fn destroy() { + let Command::Destroy { recursive_dependents, recursive_children, force_unmount, name } = + Command::try_from( + Input::shell(format!("{ZFS} destroy -rf foobar")) + ).unwrap() else { panic!("wrong command") }; + + assert!(!recursive_dependents); + assert!(recursive_children); + assert!(force_unmount); + assert_eq!(name.as_str(), "foobar"); + + assert!(Command::try_from(Input::shell(format!( + "{ZFS} destroy -x doit" + ))) + .err() + .unwrap() + .contains("Unrecognized option '-x'")); + } + + #[test] + fn get() { + let Command::Get { recursive, depth, fields, properties, datasets } = Command::try_from( + Input::shell(format!("{ZFS} get -Hrpd10 -o name,value mounted,available myvolume")) + ).unwrap() else { panic!("wrong command") }; + + assert!(recursive); + assert_eq!(depth, Some(10)); + assert_eq!(fields, vec!["name", "value"]); + assert_eq!( + properties, + vec![dataset::Property::Mounted, dataset::Property::Available] + ); + assert_eq!( + datasets.unwrap(), + vec![dataset::Name::new("myvolume".to_string()).unwrap()] + ); + + assert!(Command::try_from(Input::shell(format!( + "{ZFS} get -o name,value mounted,available myvolume" + ))) + .err() + .unwrap() + .contains( + "You should run 'zfs get' commands with the '-Hp' flags enabled" + )); + } + + #[test] + fn list() { + let Command::List { recursive, depth, properties, datasets } = Command::try_from( + Input::shell(format!("{ZFS} list -d 1 -rHpo name myfilesystem")) + ).unwrap() else { panic!("wrong command") }; + + assert!(recursive); + assert_eq!(depth.unwrap(), 1); + assert_eq!(properties, vec![dataset::Property::Name]); + assert_eq!( + datasets.unwrap(), + vec![dataset::Name::new("myfilesystem".to_string()).unwrap()] + ); + + assert!(Command::try_from(Input::shell(format!( + "{ZFS} list name myfilesystem" + ))) + .err() + .unwrap() + .contains( + "You should run 'zfs list' commands with the '-Hp' flags enabled" + )); + } + + #[test] + fn mount() { + let Command::Mount { load_keys, filesystem } = Command::try_from( + Input::shell(format!("{ZFS} mount -l foobar")) + ).unwrap() else { panic!("wrong command") }; + + assert!(load_keys); + assert_eq!(filesystem.as_str(), "foobar"); + } + + #[test] + fn set() { + let Command::Set { properties, name } = Command::try_from( + Input::shell(format!("{ZFS} set mountpoint=bar logbias=blat myfs")) + ).unwrap() else { panic!("wrong command") }; + + assert_eq!( + properties, + HashMap::from([ + (dataset::Property::Mountpoint, "bar".to_string()), + (dataset::Property::Logbias, "blat".to_string()) + ]) + ); + assert_eq!(name.as_str(), "myfs"); + } +} diff --git a/helios/tokamak/src/cli/zoneadm.rs b/helios/tokamak/src/cli/zoneadm.rs new file mode 100644 index 00000000000..39f7af0e5e7 --- /dev/null +++ b/helios/tokamak/src/cli/zoneadm.rs @@ -0,0 +1,120 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::ZoneName; + +use helios_fusion::Input; +use helios_fusion::ZONEADM; + +pub(crate) enum Command { + Boot { + name: ZoneName, + }, + Halt { + name: ZoneName, + }, + Install { + name: ZoneName, + brand_specific_args: Vec, + }, + List { + // Overrides the "list_installed" option + list_configured: bool, + list_installed: bool, + }, + Uninstall { + name: ZoneName, + force: bool, + }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != ZONEADM { + return Err(format!("Not zoneadm command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + let name = if input.shift_arg_if("-z")? { + Some(ZoneName(input.shift_arg()?)) + } else { + None + }; + + match input.shift_arg()?.as_str() { + "boot" => { + input.no_args_remaining()?; + let name = name.ok_or_else(|| { + "No zone specified, try: zoneadm -z ZONE boot" + })?; + Ok(Command::Boot { name }) + } + "halt" => { + input.no_args_remaining()?; + let name = name.ok_or_else(|| { + "No zone specified, try: zoneadm -z ZONE halt" + })?; + Ok(Command::Halt { name }) + } + "install" => { + let brand_specific_args = + input.args().into_iter().cloned().collect(); + let name = name.ok_or_else(|| { + "No zone specified, try: zoneadm -z ZONE install" + })?; + Ok(Command::Install { name, brand_specific_args }) + } + "list" => { + let mut list_configured = false; + let mut list_installed = false; + let mut parsable = false; + + while !input.args().is_empty() { + let arg = input.shift_arg()?; + let mut chars = arg.chars(); + + if let Some('-') = chars.next() { + while let Some(c) = chars.next() { + match c { + 'c' => list_configured = true, + 'i' => list_installed = true, + 'p' => parsable = true, + c => { + return Err(format!( + "Unrecognized option '-{c}'" + )) + } + } + } + } else { + return Err(format!("Unrecognized argument {arg}")); + } + } + + if !parsable { + return Err("You should run 'zoneadm list' commands with the '-p' flag enabled".to_string()); + } + + Ok(Command::List { list_configured, list_installed }) + } + "uninstall" => { + let name = name.ok_or_else(|| { + "No zone specified, try: zoneadm -z ZONE uninstall" + })?; + let force = if !input.args().is_empty() { + input.shift_arg_if("-F")? + } else { + false + }; + input.no_args_remaining()?; + Ok(Command::Uninstall { name, force }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} diff --git a/helios/tokamak/src/cli/zonecfg.rs b/helios/tokamak/src/cli/zonecfg.rs new file mode 100644 index 00000000000..2e86342fdd7 --- /dev/null +++ b/helios/tokamak/src/cli/zonecfg.rs @@ -0,0 +1,286 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; +use crate::host::{ZoneConfig, ZoneName}; + +use camino::Utf8PathBuf; +use helios_fusion::Input; +use helios_fusion::ZONECFG; + +pub(crate) enum Command { + Create { name: ZoneName, config: ZoneConfig }, + Delete { name: ZoneName }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != ZONECFG { + return Err(format!("Not zonecfg command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + input.shift_arg_expect("-z")?; + let zone = ZoneName(input.shift_arg()?); + match input.shift_arg()?.as_str() { + "create" => { + input.shift_arg_expect("-F")?; + input.shift_arg_expect("-b")?; + + enum Scope { + Global, + Dataset(zone::Dataset), + Device(zone::Device), + Fs(zone::Fs), + Net(zone::Net), + } + let mut scope = Scope::Global; + + // Globally-scoped Resources + let mut brand = None; + let mut path = None; + + // Non-Global Resources + let mut datasets = vec![]; + let mut devices = vec![]; + let mut nets = vec![]; + let mut fs = vec![]; + + while !input.args().is_empty() { + input.shift_arg_expect(";")?; + match input.shift_arg()?.as_str() { + "set" => { + let prop = input.shift_arg()?; + let (k, v) = + prop.split_once('=').ok_or_else(|| { + format!("Bad property: {prop}") + })?; + + match &mut scope { + Scope::Global => { + match k { + "brand" => { + brand = Some(v.to_string()); + } + "zonepath" => { + path = Some(Utf8PathBuf::from(v)); + } + "autoboot" => { + if v != "false" { + return Err(format!("Unhandled autoboot value: {v}")); + } + } + "ip-type" => { + if v != "exclusive" { + return Err(format!("Unhandled ip-type value: {v}")); + } + } + k => { + return Err(format!( + "Unknown property name: {k}" + )) + } + } + } + Scope::Dataset(d) => match k { + "name" => d.name = v.to_string(), + k => { + return Err(format!( + "Unknown property name: {k}" + )) + } + }, + Scope::Device(d) => match k { + "match" => d.name = v.to_string(), + k => { + return Err(format!( + "Unknown property name: {k}" + )) + } + }, + Scope::Fs(f) => match k { + "type" => f.ty = v.to_string(), + "dir" => f.dir = v.to_string(), + "special" => f.special = v.to_string(), + "raw" => f.raw = Some(v.to_string()), + "options" => { + f.options = v + .split(',') + .map(|s| s.to_string()) + .collect() + } + k => { + return Err(format!( + "Unknown property name: {k}" + )) + } + }, + Scope::Net(n) => match k { + "physical" => n.physical = v.to_string(), + "address" => { + n.address = Some(v.to_string()) + } + "allowed-address" => { + n.allowed_address = Some(v.to_string()) + } + k => { + return Err(format!( + "Unknown property name: {k}" + )) + } + }, + } + } + "add" => { + if !matches!(scope, Scope::Global) { + return Err("Cannot add from non-global scope" + .to_string()); + } + match input.shift_arg()?.as_str() { + "dataset" => { + scope = + Scope::Dataset(zone::Dataset::default()) + } + "device" => { + scope = + Scope::Device(zone::Device::default()) + } + "fs" => scope = Scope::Fs(zone::Fs::default()), + "net" => { + scope = Scope::Net(zone::Net::default()) + } + scope => { + return Err(format!( + "Unexpected scope: {scope}" + )) + } + } + } + "end" => { + match scope { + Scope::Global => { + return Err( + "Cannot end global scope".to_string() + ) + } + Scope::Dataset(d) => datasets.push(d), + Scope::Device(d) => devices.push(d), + Scope::Fs(f) => fs.push(f), + Scope::Net(n) => nets.push(n), + } + scope = Scope::Global; + } + sc => { + return Err(format!("Unexpected subcommand: {sc}")) + } + } + } + + if !matches!(scope, Scope::Global) { + return Err( + "Cannot end zonecfg outside global scope".to_string() + ); + } + + Ok(Command::Create { + name: zone, + config: ZoneConfig { + state: zone::State::Configured, + brand: brand.ok_or_else(|| "Missing brand")?, + path: path.ok_or_else(|| "Missing zonepath")?, + datasets, + devices, + nets, + fs, + layers: vec![], + }, + }) + } + "delete" => { + input.shift_arg_expect("-F")?; + Ok(Command::Delete { name: zone }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn create() { + let Command::Create { name, config } = Command::try_from( + Input::shell(format!( + "{ZONECFG} -z myzone \ + create -F -b ; \ + set brand=omicron1 ; \ + set zonepath=/zone/myzone ; \ + set autoboot=false ; \ + set ip-type=exclusive ; \ + add net ; \ + set physical=oxControlService0 ; \ + end" + )), + ).unwrap() else { + panic!("Wrong command"); + }; + + assert_eq!(name.0, "myzone"); + assert_eq!(config.state, zone::State::Configured); + assert_eq!(config.brand, "omicron1"); + assert_eq!(config.path, Utf8PathBuf::from("/zone/myzone")); + assert!(config.datasets.is_empty()); + assert_eq!(config.nets[0].physical, "oxControlService0"); + assert!(config.fs.is_empty()); + assert!(config.layers.is_empty()); + + // Missing brand + assert!(Command::try_from(Input::shell(format!( + "{ZONECFG} -z myzone \ + create -F -b ; \ + set zonepath=/zone/myzone" + )),) + .err() + .unwrap() + .contains("Missing brand")); + + // Missing zonepath + assert!(Command::try_from(Input::shell(format!( + "{ZONECFG} -z myzone \ + create -F -b ; \ + set brand=omicron1" + )),) + .err() + .unwrap() + .contains("Missing zonepath")); + + // Ending mid-scope + assert!(Command::try_from(Input::shell(format!( + "{ZONECFG} -z myzone \ + create -F -b ; \ + set brand=omicron1 ; \ + set zonepath=/zone/myzone ; \ + add net ; \ + set physical=oxControlService0" + )),) + .err() + .unwrap() + .contains("Cannot end zonecfg outside global scope")); + } + + #[test] + fn delete() { + let Command::Delete { name } = Command::try_from( + Input::shell(format!("{ZONECFG} -z myzone delete -F")), + ).unwrap() else { + panic!("Wrong command"); + }; + assert_eq!(name.0, "myzone"); + } +} diff --git a/helios/tokamak/src/cli/zpool.rs b/helios/tokamak/src/cli/zpool.rs new file mode 100644 index 00000000000..4b3f2fee0a7 --- /dev/null +++ b/helios/tokamak/src/cli/zpool.rs @@ -0,0 +1,120 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::cli::parse::InputParser; + +use camino::Utf8PathBuf; +use helios_fusion::zpool::ZpoolName; +use helios_fusion::Input; +use helios_fusion::ZPOOL; +use std::str::FromStr; + +pub(crate) enum Command { + Create { pool: ZpoolName, vdev: Utf8PathBuf }, + Export { pool: ZpoolName }, + Import { force: bool, pool: ZpoolName }, + List { properties: Vec, pools: Option> }, + Set { property: String, value: String, pool: ZpoolName }, +} + +impl TryFrom for Command { + type Error = String; + + fn try_from(input: Input) -> Result { + if input.program != ZPOOL { + return Err(format!("Not zpool command: {}", input.program)); + } + + let mut input = InputParser::new(input); + + match input.shift_arg()?.as_str() { + "create" => { + let pool = ZpoolName::from_str(&input.shift_arg()?)?; + let vdev = Utf8PathBuf::from(input.shift_arg()?); + input.no_args_remaining()?; + Ok(Command::Create { pool, vdev }) + } + "export" => { + let pool = ZpoolName::from_str(&input.shift_arg()?)?; + input.no_args_remaining()?; + Ok(Command::Export { pool }) + } + "import" => { + let force = input.shift_arg_if("-f")?; + let pool = ZpoolName::from_str(&input.shift_arg()?)?; + Ok(Command::Import { force, pool }) + } + "list" => { + let mut scripting = false; + let mut parsable = false; + let mut properties = vec![]; + let mut pools = None; + + while !input.args().is_empty() { + let arg = input.shift_arg()?; + let mut chars = arg.chars(); + // ZFS list lets callers pass in flags in groups, or + // separately. + if let Some('-') = chars.next() { + while let Some(c) = chars.next() { + match c { + 'H' => scripting = true, + 'p' => parsable = true, + 'o' => { + if chars.next().is_some() { + return Err("-o should be immediately followed by properties".to_string()); + } + properties = input + .shift_arg()? + .split(',') + .map(|s| s.to_string()) + .collect(); + } + c => { + return Err(format!( + "Unrecognized option '-{c}'" + )) + } + } + } + } else { + pools = Some(vec![ZpoolName::from_str(&arg)?]); + break; + } + } + + let remaining_pools = input.args(); + if !remaining_pools.is_empty() { + pools.get_or_insert(vec![]).extend( + remaining_pools + .into_iter() + .map(|s| ZpoolName::from_str(s)) + .collect::, String>>()?, + ) + }; + if !scripting || !parsable { + return Err("You should run 'zpool list' commands with the '-Hp' flags enabled".to_string()); + } + + if properties.is_empty() { + properties = vec!["name".to_string(), "health".to_string()]; + } + Ok(Command::List { properties, pools }) + } + "set" => { + let prop = input.shift_arg()?; + let (k, v) = prop + .split_once('=') + .ok_or_else(|| format!("Bad property: {prop}"))?; + let property = k.to_string(); + let value = v.to_string(); + + let pool = ZpoolName::from_str(&input.shift_arg()?)?; + input.no_args_remaining()?; + Ok(Command::Set { property, value, pool }) + } + command => return Err(format!("Unexpected command: {command}")), + } + } +} diff --git a/helios/tokamak/src/executor.rs b/helios/tokamak/src/executor.rs index 888d900a220..6a5da5434cd 100644 --- a/helios/tokamak/src/executor.rs +++ b/helios/tokamak/src/executor.rs @@ -145,8 +145,9 @@ impl Executor for FakeExecutor { ) -> Result { let id = self.inner.counter.fetch_add(1, Ordering::SeqCst); log_input(&self.inner.log, id, command); - - Ok(FakeChild::new(id, command, self.inner.clone())) + let mut child = FakeChild::new(id, command, self.inner.clone()); + self.inner.spawn_handler.lock().unwrap()(&mut child); + Ok(child) } } @@ -194,19 +195,29 @@ impl FakeChild { pub fn command(&self) -> &Command { &self.command } + + pub fn stdin(&self) -> &SharedByteQueue { + &self.stdin + } + pub fn stdout(&self) -> &SharedByteQueue { + &self.stdout + } + pub fn stderr(&self) -> &SharedByteQueue { + &self.stderr + } } impl Child for FakeChild { fn take_stdin(&mut self) -> Option> { - Some(Box::new(self.stdin.clone())) + Some(Box::new(self.stdin.take_writer())) } fn take_stdout(&mut self) -> Option> { - Some(Box::new(self.stdout.clone())) + Some(Box::new(self.stdout.take_reader())) } fn take_stderr(&mut self) -> Option> { - Some(Box::new(self.stderr.clone())) + Some(Box::new(self.stderr.take_reader())) } fn id(&self) -> u32 { diff --git a/helios/tokamak/src/host/datasets.rs b/helios/tokamak/src/host/datasets.rs new file mode 100644 index 00000000000..d7600c18ea8 --- /dev/null +++ b/helios/tokamak/src/host/datasets.rs @@ -0,0 +1,747 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Emulates datasets + +use crate::types::dataset; + +use camino::Utf8PathBuf; +use helios_fusion::zpool::ZpoolName; +use petgraph::stable_graph::{StableGraph, WalkNeighbors}; +use std::collections::HashMap; + +pub(crate) struct DatasetProperties(HashMap); + +impl DatasetProperties { + pub(crate) fn get( + &self, + property: dataset::Property, + ) -> Result { + self.0 + .get(&property) + .map(|k| k.to_string()) + .ok_or_else(|| format!("Missing '{property}' property")) + } + + fn insert>(&mut self, k: dataset::Property, v: V) { + self.0.insert(k, v.into()); + } +} + +pub(crate) struct FakeDataset { + idx: NodeIndex, + properties: DatasetProperties, + ty: dataset::Type, +} + +impl FakeDataset { + fn new( + idx: NodeIndex, + properties: HashMap, + ty: dataset::Type, + ) -> Self { + Self { idx, properties: DatasetProperties(properties), ty } + } + + pub fn properties(&self) -> &DatasetProperties { + &self.properties + } + + pub fn ty(&self) -> dataset::Type { + self.ty + } + + fn mounted(&self) -> bool { + self.properties + .get(dataset::Property::Mounted) + .map(|s| s == "yes") + .unwrap_or(false) + } + + fn mountpoint(&self) -> Option { + self.properties + .get(dataset::Property::Mountpoint) + .map(|s| Utf8PathBuf::from(s)) + .ok() + } + + fn mount(&mut self, load_keys: bool) -> Result<(), String> { + let properties = &mut self.properties; + + match self.ty { + dataset::Type::Filesystem => { + if properties.get(dataset::Property::Mounted)? != "no" { + return Err("Already mounted".to_string()); + } + } + _ => return Err("Not a filesystem".to_string()), + }; + + let encryption_property = + properties.get(dataset::Property::Encryption)?; + let encrypted = match encryption_property.as_str() { + "off" => false, + "aes-256-gcm" => true, + _ => { + return Err(format!( + "Unsupported encryption: {encryption_property}" + )) + } + }; + + if encrypted { + if !load_keys { + return Err(format!("Use 'zfs mount -l'")); + } + let keylocation_property = + properties.get(dataset::Property::Keylocation)?; + + // NOTE: This doesn't yet support reading from stdin, but it could. + let Some(keylocation) = keylocation_property.strip_prefix("file://").map(|k| Utf8PathBuf::from(k)) else { + return Err(format!("Cannot read from key location: {keylocation_property}")); + }; + if !keylocation.exists() { + return Err(format!("Key at {keylocation} does not exist")); + } + + let keyformat = properties.get(dataset::Property::Keyformat)?; + if keyformat != "raw" { + return Err(format!("Unknown keyformat: {keyformat}")); + } + } + + let mountpoint = properties.get(dataset::Property::Mountpoint)?; + if !mountpoint.starts_with('/') { + return Err(format!("Cannot mount with mountpoint: {mountpoint}")); + } + + properties.insert(dataset::Property::Mounted, "yes"); + Ok(()) + } + + // TODO: Confirm that the filesystem isn't used by zones? + fn unmount(&mut self) -> Result<(), String> { + let mounted = match &mut self.ty { + dataset::Type::Filesystem => { + self.properties.get(dataset::Property::Mounted)? + } + _ => return Err("Not a filesystem".to_string()), + }; + if mounted == "no" { + return Err("Filesystem is not mounted".to_string()); + } + self.properties.insert(dataset::Property::Mounted, "no"); + Ok(()) + } +} + +pub(crate) enum DatasetNode { + Root, + Dataset(dataset::Name), +} + +impl DatasetNode { + pub fn to_string(&self) -> String { + match self { + DatasetNode::Root => "/".to_string(), + DatasetNode::Dataset(name) => name.as_str().to_string(), + } + } + + pub fn dataset_name(&self) -> Option<&dataset::Name> { + match self { + DatasetNode::Root => None, + DatasetNode::Dataset(name) => Some(name), + } + } +} + +pub(crate) enum DatasetInsert { + // Used to create datasets that correspond with zpools. + // + // These datasets do not required parents, and may be directly + // attached to the root of the dataset DAG. + WithoutParent(ZpoolName), + + // Used to create datasets "the normal way", where the name should imply a + // parent dataset which already exists in the dataset DAG. + WithParent(dataset::Name), +} + +impl DatasetInsert { + fn name(&self) -> dataset::Name { + use DatasetInsert::*; + match self { + WithoutParent(zpool) => dataset::Name::new(zpool.to_string()) + .expect("Zpool names should be valid datasets"), + WithParent(name) => name.clone(), + } + } +} + +/// The type of an index used to lookup nodes in the DAG. +pub(crate) type NodeIndex = + petgraph::graph::NodeIndex; + +/// Describes access to zpools and datasets that exist within the system. +/// +/// On Helios, datasets exist as children of zpools, within a DAG structure. +/// Understanding the relationship between these datasets is important to +/// accurately emulate many ZFS operations, such as deletion, which can +/// conditionally succeed or fail depending on the prescence of children. +pub(crate) struct Datasets { + // Describes the connectivity between nodes + dataset_graph: StableGraph, + dataset_graph_root: NodeIndex, + + // Individual nodes themselves + datasets: HashMap, +} + +impl Datasets { + pub(crate) fn new() -> Self { + let mut dataset_graph = StableGraph::new(); + let dataset_graph_root = dataset_graph.add_node(DatasetNode::Root); + Self { dataset_graph, dataset_graph_root, datasets: HashMap::new() } + } + + pub fn get_dataset(&self, name: &dataset::Name) -> Option<&FakeDataset> { + self.datasets.get(name) + } + + pub fn get_dataset_mut( + &mut self, + name: &dataset::Name, + ) -> Option<&mut FakeDataset> { + self.datasets.get_mut(name) + } + + /// Returns the index of the "root" of the DatasetNode DAG. + /// + /// This node does not actually exist, but the children of this node should + /// be datasets from zpools. + pub fn root_index(&self) -> NodeIndex { + self.dataset_graph_root + } + + /// Returns the node index of a dataset + pub fn index_of(&self, name: &str) -> Result { + if let Some(dataset) = self.datasets.get(&dataset::Name::new(name)?) { + return Ok(dataset.idx); + } + Err(format!("{} not found", name)) + } + + /// Looks up a DatasetNode by an index + pub fn lookup_by_index(&self, index: NodeIndex) -> Option<&DatasetNode> { + self.dataset_graph.node_weight(index) + } + + pub fn children( + &self, + idx: NodeIndex, + ) -> impl Iterator + '_ { + self.dataset_graph + .neighbors_directed(idx, petgraph::Direction::Outgoing) + } + + pub fn children_mut( + &self, + idx: NodeIndex, + ) -> WalkNeighbors { + self.dataset_graph + .neighbors_directed(idx, petgraph::Direction::Outgoing) + .detach() + } + + pub fn add_dataset( + &mut self, + insert: DatasetInsert, + mut properties: HashMap, + ty: dataset::Type, + ) -> Result<(), String> { + for property in properties.keys() { + if !property.target().contains(ty.into()) { + return Err(format!( + "Cannot create {ty} with property {property}" + )); + } + } + + let name = insert.name(); + if self.datasets.contains_key(&name) { + return Err(format!( + "Cannot create '{}': already exists", + name.as_str() + )); + } + + properties.insert(dataset::Property::Type, ty.to_string()); + properties.insert(dataset::Property::Name, name.to_string()); + properties + .entry(dataset::Property::Encryption) + .or_insert("off".to_string()); + properties.entry(dataset::Property::Zoned).or_insert("off".to_string()); + + match &ty { + dataset::Type::Filesystem => { + properties + .entry(dataset::Property::Atime) + .or_insert("on".to_string()); + properties.insert(dataset::Property::Mounted, "no".to_string()); + properties + .entry(dataset::Property::Mountpoint) + .or_insert("none".to_string()); + } + dataset::Type::Volume => (), + dataset::Type::Snapshot => (), + } + + let (parent, parent_idx) = match insert { + DatasetInsert::WithoutParent(_) => { + if ty != dataset::Type::Filesystem { + return Err(format!("Cannot create '{name}' as anything other than a filesystem")); + } + ("/", self.root_index()) + } + DatasetInsert::WithParent(_) => { + let parent = if let Some((parent, _)) = + name.as_str().rsplit_once('/') + { + parent + } else { + return Err(format!("Cannot create '{}': No parent dataset. Try creating one under an existing filesystem or zpool?", name.as_str())); + }; + + let parent_idx = self.index_of(parent)?; + + (parent, parent_idx) + } + }; + if !self.dataset_graph.contains_node(parent_idx) { + return Err(format!( + "Cannot create fs '{}': Missing parent node: {}", + name.as_str(), + parent + )); + } + let idx = + self.dataset_graph.add_node(DatasetNode::Dataset(name.clone())); + self.dataset_graph.add_edge(parent_idx, idx, ()); + self.datasets.insert(name, FakeDataset::new(idx, properties, ty)); + + Ok(()) + } + + pub fn mount( + &mut self, + load_keys: bool, + name: &dataset::Name, + ) -> Result<(), String> { + let dataset = self + .datasets + .get_mut(&name) + .ok_or_else(|| format!("Cannot mount '{name}': Does not exist"))?; + + dataset + .mount(load_keys) + .map_err(|err| format!("Cannot mount '{name}': {err}"))?; + + Ok(()) + } + + pub fn unmount(&mut self, name: &dataset::Name) -> Result<(), String> { + let dataset = self.datasets.get_mut(&name).ok_or_else(|| { + format!("Cannot unmount '{name}': Does not exist") + })?; + dataset + .unmount() + .map_err(|err| format!("Cannot unmount '{name}': {err}"))?; + Ok(()) + } + + pub fn destroy( + &mut self, + name: &dataset::Name, + // TODO: Not emulating this option + _recusive_dependents: bool, + recursive_children: bool, + force_unmount: bool, + ) -> Result<(), String> { + let dataset = self.datasets.get_mut(&name).ok_or_else(|| { + format!("Cannot destroy '{name}': Does not exist") + })?; + + if dataset.mounted() { + if !force_unmount { + return Err(format!("Cannot destroy '{name}': Mounted")); + } + dataset.unmount()?; + } + + let idx = dataset.idx; + + let mut children = self.children_mut(idx); + while let Some(child_idx) = children.next_node(&self.dataset_graph) { + let child_name = self + .lookup_by_index(child_idx) + .map(|n| n.to_string()) + .ok_or_else(|| format!("Child node missing name"))?; + let child_name = dataset::Name::new(child_name)?; + + if !recursive_children { + return Err(format!("Cannot delete dataset {name}: has children (e.g.: {child_name})")); + } + self.destroy( + &child_name, + _recusive_dependents, + recursive_children, + force_unmount, + )?; + } + self.dataset_graph.remove_node(idx); + self.datasets.remove(&name); + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::str::FromStr; + use uuid::Uuid; + + fn expect_err>( + result: Result, + expected: S, + ) -> Result<(), String> { + let expected: String = expected.into(); + let errmsg = result.err().unwrap(); + if !errmsg.contains(&expected) { + return Err(format!( + "Bad error: Expected: '{expected}', but saw '{errmsg}'" + )); + } + Ok(()) + } + + #[test] + fn create_dataset_tree() { + let mut datasets = Datasets::new(); + assert_eq!(None, datasets.children(datasets.root_index()).next()); + + let id = Uuid::new_v4(); + let zpool = format!("oxp_{id}"); + + // Create a filesystem for a fake zpool + + let zpool_dataset = dataset::Name::new(&zpool).unwrap(); + let zpool = ZpoolName::from_str(&zpool).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add dataset"); + + // Create a dataset as a child of that fake zpool filesystem + + let dataset_a = + dataset::Name::new(format!("{zpool}/dataset_a")).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithParent(dataset_a.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add datasets"); + + // Create a child of the previous child + + let dataset_b = + dataset::Name::new(format!("{dataset_a}/dataset_b")).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithParent(dataset_b.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add datasets"); + + let dataset_c = + dataset::Name::new(format!("{zpool}/dataset_c")).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithParent(dataset_c.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add datasets"); + + // The layout should look like the following: + // + // oxp_ + // oxp_/dataset_a + // oxp_/dataset_a/dataset_b + // oxp_/dataset_c + + let z = datasets.get_dataset(&zpool_dataset).unwrap(); + let a = datasets.get_dataset(&dataset_a).unwrap(); + let b = datasets.get_dataset(&dataset_b).unwrap(); + let c = datasets.get_dataset(&dataset_c).unwrap(); + assert_eq!(z.ty, dataset::Type::Filesystem); + assert_eq!(a.ty, dataset::Type::Filesystem); + assert_eq!(b.ty, dataset::Type::Filesystem); + assert_eq!(c.ty, dataset::Type::Filesystem); + + // Root of datasets + let mut children = datasets.children(datasets.root_index()); + assert_eq!(Some(z.idx), children.next()); + assert_eq!(None, children.next()); + + // Zpool -> Datasets + let mut children = datasets.children(z.idx); + assert_eq!(Some(c.idx), children.next()); + assert_eq!(Some(a.idx), children.next()); + assert_eq!(None, children.next()); + + // Dataset with children + let mut children = datasets.children(a.idx); + assert_eq!(Some(b.idx), children.next()); + assert_eq!(None, children.next()); + + // Leaf nodes + assert_eq!(None, datasets.children(b.idx).next()); + assert_eq!(None, datasets.children(c.idx).next()); + } + + #[test] + fn filesystem_properties() { + let mut datasets = Datasets::new(); + + let id = Uuid::new_v4(); + let zpool_str_name = format!("oxp_{id}"); + + // Create a filesystem for a fake zpool + + let zpool_dataset = dataset::Name::new(&zpool_str_name).unwrap(); + let zpool = ZpoolName::from_str(&zpool_str_name).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add dataset"); + + let d = datasets.get_dataset(&zpool_dataset).unwrap(); + use dataset::Property::*; + assert_eq!("on", d.properties.get(Atime).unwrap()); + assert_eq!("off", d.properties.get(Encryption).unwrap()); + assert_eq!("no", d.properties.get(Mounted).unwrap()); + assert_eq!("none", d.properties.get(Mountpoint).unwrap()); + assert_eq!(zpool_str_name, d.properties.get(Name).unwrap()); + assert_eq!("filesystem", d.properties.get(Type).unwrap()); + assert_eq!("off", d.properties.get(Zoned).unwrap()); + } + + #[test] + fn filesystem_mount() { + let mut datasets = Datasets::new(); + + let id = Uuid::new_v4(); + let zpool_str_name = format!("oxp_{id}"); + + // Create a filesystem for a fake zpool + + let zpool_dataset = dataset::Name::new(&zpool_str_name).unwrap(); + let zpool = ZpoolName::from_str(&zpool_str_name).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add dataset"); + + let d = datasets.get_dataset(&zpool_dataset).unwrap(); + use dataset::Property::*; + assert_eq!("no", d.properties.get(Mounted).unwrap()); + assert_eq!("none", d.properties.get(Mountpoint).unwrap()); + drop(d); + + // Try to mount using the "none" mountpoint + let load_keys = false; + expect_err( + datasets.mount(load_keys, &zpool_dataset), + "Cannot mount with mountpoint: none", + ) + .unwrap(); + + // Update the mountpoint, try again + let d = datasets.get_dataset_mut(&zpool_dataset).unwrap(); + d.properties.insert(Mountpoint, "/foobar"); + drop(d); + + // We can mount it successfully + datasets.mount(load_keys, &zpool_dataset).unwrap(); + + // Re-mounting returns an error + expect_err( + datasets.mount(load_keys, &zpool_dataset), + "Already mounted", + ) + .unwrap(); + + // We can unmount successfully + datasets.unmount(&zpool_dataset).unwrap(); + + // Re-unmounting returns an error + expect_err( + datasets.unmount(&zpool_dataset), + "Filesystem is not mounted", + ) + .unwrap(); + } + + #[test] + fn invalid_dataset_insertion() { + let mut datasets = Datasets::new(); + + let id = Uuid::new_v4(); + let zpool_str_name = format!("oxp_{id}"); + let zpool = ZpoolName::from_str(&zpool_str_name).unwrap(); + + // Invalid property (meant for volume) + expect_err( + datasets.add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::from([( + dataset::Property::Volsize, + "10G".to_string(), + )]), + dataset::Type::Filesystem, + ), + "Cannot create filesystem with property volsize", + ) + .unwrap(); + + // Cannot create volume for "without parent" + expect_err( + datasets.add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::new(), + dataset::Type::Volume, + ), + format!("Cannot create '{zpool_str_name}' as anything other than a filesystem"), + ).unwrap(); + + // Cannot create filesystem "WithParent" that does not exist + expect_err( + datasets.add_dataset( + DatasetInsert::WithParent( + dataset::Name::new("mydataset").unwrap(), + ), + HashMap::new(), + dataset::Type::Filesystem, + ), + format!("No parent dataset"), + ) + .unwrap(); + + expect_err( + datasets.add_dataset( + DatasetInsert::WithParent( + dataset::Name::new("mydataset/nested").unwrap(), + ), + HashMap::new(), + dataset::Type::Filesystem, + ), + format!("mydataset not found"), + ) + .unwrap(); + } + + #[test] + fn destroy_datasets() { + let mut datasets = Datasets::new(); + assert_eq!(None, datasets.children(datasets.root_index()).next()); + + let id = Uuid::new_v4(); + let zpool = format!("oxp_{id}"); + + // Create a filesystem for a fake zpool + + let zpool_dataset = dataset::Name::new(&zpool).unwrap(); + let zpool = ZpoolName::from_str(&zpool).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithoutParent(zpool.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add dataset"); + + let dataset_a = + dataset::Name::new(format!("{zpool}/dataset_a")).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithParent(dataset_a.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add datasets"); + + let dataset_b = + dataset::Name::new(format!("{dataset_a}/dataset_b")).unwrap(); + datasets + .add_dataset( + DatasetInsert::WithParent(dataset_b.clone()), + HashMap::new(), + dataset::Type::Filesystem, + ) + .expect("Failed to add datasets"); + + // Cannot destroy dataset with children + + let recusive_dependents = true; + let recursive_children = false; + let force_unmount = false; + expect_err( + datasets.destroy( + &dataset_a, + recusive_dependents, + recursive_children, + force_unmount, + ), + &format!("has children"), + ) + .unwrap(); + + // The datasets still exist + datasets.get_dataset(&zpool_dataset).unwrap(); + datasets.get_dataset(&dataset_a).unwrap(); + datasets.get_dataset(&dataset_b).unwrap(); + + // Try with the recursive children option + + let recursive_children = true; + datasets + .destroy( + &dataset_a, + recusive_dependents, + recursive_children, + force_unmount, + ) + .unwrap(); + + // The destroyed datasets are gone + + datasets.get_dataset(&zpool_dataset).unwrap(); + assert!(datasets.get_dataset(&dataset_a).is_none()); + assert!(datasets.get_dataset(&dataset_b).is_none()); + } +} diff --git a/helios/tokamak/src/host/mod.rs b/helios/tokamak/src/host/mod.rs new file mode 100644 index 00000000000..c8e247f960a --- /dev/null +++ b/helios/tokamak/src/host/mod.rs @@ -0,0 +1,1014 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Emulates an illumos system + +// TODO: REMOVE +#![allow(dead_code)] + +use crate::types::dataset; +use crate::{FakeChild, FakeExecutor, FakeExecutorBuilder}; + +use camino::Utf8PathBuf; +use helios_fusion::interfaces::libc; +use helios_fusion::interfaces::swapctl; +use helios_fusion::zpool::ZpoolName; +use helios_fusion::{Child, Input, Output, OutputExt}; +use ipnetwork::IpNetwork; +use slog::Logger; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::io::Read; +use std::sync::{Arc, Mutex}; + +mod datasets; +mod zpools; + +use datasets::{DatasetInsert, Datasets}; +use zpools::{FakeZpool, Zpools}; + +pub enum LinkType { + Etherstub, + Vnic, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct LinkName(pub String); +struct Link { + pub ty: LinkType, + pub parent: Option, + pub properties: HashMap, +} + +pub struct IpInterfaceName(pub String); +pub struct IpInterface {} + +pub enum RouteDestination { + Default, + Literal(IpNetwork), +} + +pub struct Route { + pub destination: RouteDestination, + pub gateway: IpNetwork, +} + +#[derive(Debug)] +pub struct ServiceName(pub String); + +pub struct Service { + pub state: smf::SmfState, + pub properties: HashMap, +} + +struct ZoneEnvironment { + id: u64, + links: HashMap, + ip_interfaces: HashMap, + routes: Vec, + services: HashMap, +} + +impl ZoneEnvironment { + fn new(id: u64) -> Self { + Self { + id, + links: HashMap::new(), + ip_interfaces: HashMap::new(), + routes: vec![], + services: HashMap::new(), + } + } +} + +#[derive(Clone, Debug)] +pub struct ZoneName(pub String); + +pub struct ZoneConfig { + pub state: zone::State, + pub brand: String, + // zonepath + pub path: Utf8PathBuf, + pub datasets: Vec, + pub devices: Vec, + pub nets: Vec, + pub fs: Vec, + // E.g. zone image, overlays, etc. + pub layers: Vec, +} + +struct Zone { + config: ZoneConfig, + environment: ZoneEnvironment, +} + +// A context parameter which is passed between subcommands. +// +// Mostly used to simplify argument passing. +struct ProcessContext<'a> { + host: &'a Arc>, + child: &'a mut FakeChild, +} + +impl<'a> ProcessContext<'a> { + fn new( + host: &'a Arc>, + child: &'a mut FakeChild, + ) -> Self { + Self { host, child } + } + + // Spawns a thread which waits for stdin to be fully written, then executes + // a user-supplied function. + fn read_all_stdin_and_then< + F: FnOnce(Vec) -> Output + Send + 'static, + >( + &self, + f: F, + ) -> ProcessState { + let mut stdin = self.child.stdin().take_reader(); + ProcessState::Executing(std::thread::spawn(move || { + let mut buf = Vec::new(); + if let Err(err) = stdin.read_to_end(&mut buf) { + return Output::failure() + .set_stderr(format!("Cannot read from stdin: {err}")); + } + + f(buf) + })) + } +} + +// A "process", which is either currently executing or completed. +// +// It's up to the caller to check-in on an "executing" process +// by calling "wait" on it. +enum ProcessState { + Executing(std::thread::JoinHandle), + Completed(Output), +} + +impl ProcessState { + fn wait(self) -> Output { + match self { + ProcessState::Executing(handle) => { + handle.join().expect("Failed to wait for spawned process") + } + ProcessState::Completed(output) => output, + } + } +} + +fn to_stderr>(s: S) -> Output { + Output::failure().set_stderr(s) +} + +struct FakeHostInner { + log: Logger, + global: ZoneEnvironment, + zones: HashMap, + + vdevs: HashSet, + datasets: Datasets, + zpools: Zpools, + swap_devices: Vec, + + processes: HashMap, +} + +impl FakeHostInner { + fn new(log: Logger) -> Self { + Self { + log, + global: ZoneEnvironment::new(0), + zones: HashMap::new(), + vdevs: HashSet::new(), + datasets: Datasets::new(), + zpools: Zpools::new(), + swap_devices: vec![], + processes: HashMap::new(), + } + } + + fn run_process( + &mut self, + context: ProcessContext<'_>, + ) -> Result { + let input = Input::from(context.child.command()); + + let cmd = crate::cli::Command::try_from(input).map_err(to_stderr)?; + // TODO: Pick the right zone, act on it. + // + // TODO: If we can, complete immediately. + // Otherwise, spawn a ProcessState::Executing thread, and grab + // whatever stuff we need from the FakeChild. + + let _with_pfexec = cmd.with_pfexec(); + let zone = (*cmd.in_zone()).clone(); + + use crate::cli::KnownCommand::*; + match cmd.as_cmd() { + Zfs(cmd) => self.run_zfs(context, cmd, zone), + Zpool(cmd) => self.run_zpool(context, cmd, zone), + _ => todo!(), + } + } + + fn run_zfs( + &mut self, + context: ProcessContext<'_>, + cmd: crate::cli::zfs::Command, + zone: Option, + ) -> Result { + use crate::cli::zfs::Command::*; + if zone.is_some() { + return Err(to_stderr("Not Supported: 'zfs' commands within zone")); + } + match cmd { + CreateFilesystem { properties, name } => { + for property in properties.keys() { + if property.access() == dataset::PropertyAccess::ReadOnly { + return Err(to_stderr( + "Not supported: {property} is a read-only property", + )); + } + } + + self.datasets + .add_dataset( + DatasetInsert::WithParent(name.clone()), + properties, + dataset::Type::Filesystem, + ) + .map_err(to_stderr)?; + + Ok(ProcessState::Completed(Output::success().set_stdout( + format!("Created {} successfully\n", name.as_str()), + ))) + } + CreateVolume { mut properties, sparse, blocksize, size, name } => { + for property in properties.keys() { + if property.access() == dataset::PropertyAccess::ReadOnly { + return Err(to_stderr( + "Not supported: {property} is a read-only property", + )); + } + } + + let blocksize = blocksize.unwrap_or(8192); + if sparse { + properties.insert( + dataset::Property::Reservation, + "0".to_string(), + ); + } else { + // NOTE: This isn't how much metadata is used, but it's + // a number we can use that represents "this is larger than + // the usable size of the volume". + // + // See: + // + // $ zfs get -Hp used,volsize,refreservation + // + // For any non-sparse zpool. + let reserved_size = size + (8 << 20); + properties.insert( + dataset::Property::Reservation, + reserved_size.to_string(), + ); + } + properties.insert( + dataset::Property::Volblocksize, + blocksize.to_string(), + ); + properties.insert(dataset::Property::Volsize, size.to_string()); + + let mut keylocation = None; + let mut keysize = 0; + + for (k, v) in &properties { + match k { + dataset::Property::Keylocation => { + keylocation = Some(v.to_string()) + } + dataset::Property::Encryption => match v.as_str() { + "aes-256-gcm" => keysize = 32, + _ => { + return Err(Output::failure() + .set_stderr("Unsupported encryption")) + } + }, + _ => (), + } + } + + // Create a closure to add the dataset. + // + // We either call this immediately, or in a background thread, + // depending on whether or not we need to read a key from stdin. + let inner = context.host.clone(); + let add_dataset = move || { + let mut inner = inner.lock().unwrap(); + match inner.datasets.add_dataset( + DatasetInsert::WithParent(name), + properties, + dataset::Type::Volume, + ) { + Ok(()) => Output::success(), + Err(err) => Output::failure().set_stderr(err), + } + }; + + if keylocation.as_deref() == Some("file:///dev/stdin") { + return Ok(context.read_all_stdin_and_then(move |input| { + if input.len() != keysize { + return Output::failure().set_stderr(format!( + "Bad key length: {}", + input.len() + )); + } + add_dataset() + })); + } + Ok(ProcessState::Completed(add_dataset())) + } + Destroy { + recursive_dependents, + recursive_children, + force_unmount, + name, + } => { + self.datasets + .destroy( + &name, + recursive_dependents, + recursive_children, + force_unmount, + ) + .map_err(to_stderr)?; + + Ok(ProcessState::Completed( + Output::success().set_stdout(format!("{} destroyed", name)), + )) + } + Get { recursive, depth, fields, properties, datasets } => { + let mut targets = if let Some(datasets) = datasets { + let mut targets = VecDeque::new(); + + let depth = if recursive { depth } else { Some(0) }; + for dataset in datasets { + let zix = self + .datasets + .index_of(dataset.as_str()) + .map_err(to_stderr)?; + targets.push_back((zix, depth)); + } + targets + } else { + VecDeque::from([( + self.datasets.root_index(), + depth.map(|d| d + 1), + )]) + }; + + let mut output = String::new(); + + while let Some((target, depth)) = targets.pop_front() { + let node = self.datasets.lookup_by_index(target).expect( + "We should have looked up the dataset earlier...", + ); + + let (add_children, child_depth) = if let Some(depth) = depth + { + if depth > 0 { + (true, Some(depth - 1)) + } else { + (false, None) + } + } else { + (true, None) + }; + + if add_children { + for child in self.datasets.children(target) { + targets.push_front((child, child_depth)); + } + } + + if target == self.datasets.root_index() { + // Skip the root node, as there is nothing to + // display for it. + continue; + } + + for property in &properties { + for field in &fields { + match field.as_str() { + "name" => { + output.push_str(&node.to_string()); + } + "property" => { + output.push_str(&property.to_string()); + } + "value" => { + let name = node.dataset_name().expect( + "Non-root node should have name", + ); + let dataset = self + .datasets + .get_dataset(&name) + .expect("Dataset should exist"); + let value = dataset + .properties() + .get(*property) + .map_err(|e| to_stderr(e))?; + output.push_str(&value); + } + "source" => output.push_str("???"), + f => { + return Err(to_stderr(format!( + "Unknown field: {f}" + ))) + } + } + output.push_str("\t"); + } + output.push_str("\n"); + } + } + Ok(ProcessState::Completed( + Output::success().set_stdout(output), + )) + } + List { recursive, depth, properties, datasets } => { + let mut targets = if let Some(datasets) = datasets { + let mut targets = VecDeque::new(); + + // If we explicitly request datasets, only return + // information for the exact matches, unless a + // recursive walk was requested. + let depth = if recursive { depth } else { Some(0) }; + + for dataset in datasets { + let zix = self + .datasets + .index_of(dataset.as_str()) + .map_err(to_stderr)?; + targets.push_back((zix, depth)); + } + + targets + } else { + // Bump whatever the depth was up by one, since we + // don't display anything for the root node. + VecDeque::from([( + self.datasets.root_index(), + depth.map(|d| d + 1), + )]) + }; + + let mut output = String::new(); + + while let Some((target, depth)) = targets.pop_front() { + let (add_children, child_depth) = if let Some(depth) = depth + { + if depth > 0 { + (true, Some(depth - 1)) + } else { + (false, None) + } + } else { + (true, None) + }; + + if add_children { + for child in self.datasets.children(target) { + targets.push_front((child, child_depth)); + } + } + + if target == self.datasets.root_index() { + // Skip the root node, as there is nothing to + // display for it. + continue; + } + let dataset_name = self + .datasets + .lookup_by_index(target) + .expect("We should have looked up this node earlier...") + .dataset_name() + .expect("Cannot access name"); + + let dataset = self + .datasets + .get_dataset(&dataset_name) + .expect("Cannot access dataset"); + + for property in &properties { + let value = dataset + .properties() + .get(*property) + .map_err(|err| to_stderr(err))?; + + output.push_str(&value); + output.push_str("\t"); + } + output.push_str("\n"); + } + + Ok(ProcessState::Completed( + Output::success().set_stdout(output), + )) + } + Mount { load_keys, filesystem } => { + self.datasets + .mount(load_keys, &filesystem) + .map_err(to_stderr)?; + Ok(ProcessState::Completed( + Output::success() + .set_stdout(format!("{} mounted", filesystem)), + )) + } + Set { properties, name } => { + // TODO + todo!("Calling zfs set with properties: {properties:?} on '{name}', not implemented"); + } + } + } + + fn run_zpool( + &mut self, + _context: ProcessContext<'_>, + cmd: crate::cli::zpool::Command, + zone: Option, + ) -> Result { + use crate::cli::zpool::Command::*; + if zone.is_some() { + return Err(to_stderr( + "Not Supported: 'zpool' commands within zone", + )); + } + match cmd { + Create { pool, vdev } => { + if !self.vdevs.contains(&vdev) { + return Err(to_stderr(format!( + "Cannot create zpool: device '{vdev}' does not exist" + ))); + } + + let import = true; + self.zpools + .insert(pool.clone(), vdev.clone(), import) + .map_err(to_stderr)?; + + let mut dataset_properties = HashMap::new(); + dataset_properties + .insert(dataset::Property::Mountpoint, format!("/{pool}")); + self.datasets + .add_dataset( + DatasetInsert::WithoutParent(pool), + dataset_properties, + dataset::Type::Filesystem, + ) + .expect("Failed to add dataset after creating zpool"); + Ok(ProcessState::Completed(Output::success())) + } + Export { pool: name } => { + let Some(mut pool) = self.zpools.get_mut(&name) else { + return Err(to_stderr(format!("pool does not exist"))); + }; + + if !pool.imported { + return Err(to_stderr(format!( + "cannot export pool which is already exported" + ))); + } + pool.imported = false; + Ok(ProcessState::Completed(Output::success())) + } + Import { force: _, pool: name } => { + let Some(mut pool) = self.zpools.get_mut(&name) else { + return Err(to_stderr(format!("pool does not exist"))); + }; + + if pool.imported { + return Err(to_stderr(format!( + "a pool with that name is already created" + ))); + } + pool.imported = true; + Ok(ProcessState::Completed(Output::success())) + } + List { properties, pools } => { + let mut output = String::new(); + let mut display = |name: &ZpoolName, + pool: &FakeZpool, + properties: &Vec| + -> Result<(), _> { + for property in properties { + match property.as_str() { + "name" => output.push_str(&format!("{}", name)), + "health" => { + output.push_str(&pool.health.to_string()) + } + _ => { + return Err(to_stderr(format!( + "Unknown property: {property}" + ))) + } + } + output.push_str("\t"); + } + output.push_str("\n"); + Ok(()) + }; + + if let Some(pools) = pools { + for name in &pools { + let pool = self.zpools.get(name).ok_or_else(|| { + to_stderr(format!("{} does not exist", name)) + })?; + + if !pool.imported { + return Err(to_stderr(format!( + "{} not imported", + name + ))); + } + + display(&name, &pool, &properties)?; + } + } else { + for (name, pool) in self.zpools.all() { + if pool.imported { + display(&name, &pool, &properties)?; + } + } + } + + Ok(ProcessState::Completed( + Output::success().set_stdout(output), + )) + } + Set { property, value, pool: name } => { + let Some(pool) = self.zpools.get_mut(&name) else { + return Err(to_stderr(format!("{} does not exist", name))); + }; + pool.properties.insert(property, value); + Ok(ProcessState::Completed(Output::success())) + } + } + } + + // Handle requests from an executor to spawn a new child. + // + // We aren't acting on "self" here to allow a background thread to clone + // access to ourselves. + fn handle_spawn(inner: &Arc>, child: &mut FakeChild) { + let mut me = inner.lock().unwrap(); + + assert!( + me.processes.get(&child.id()).is_none(), + "Process is already spawned: {}", + Input::from(child.command()), + ); + + let process = match me.run_process(ProcessContext::new(inner, child)) { + Ok(process) => process, + Err(err) => ProcessState::Completed(err), + }; + me.processes.insert(child.id(), process); + } + + // Handle requests from an executor to wait for a child to complete. + // + // NOTE: This function panics if the child was not previously spawned. + fn handle_wait(&mut self, child: &mut FakeChild) -> Output { + self.processes + .remove(&child.id()) + .unwrap_or_else(|| { + panic!( + "Waiting for a child that has not been spawned: {}", + Input::from(child.command()) + ); + }) + .wait() + } +} + +pub struct FakeHost { + executor: Arc, + inner: Arc>, +} + +impl FakeHost { + pub fn new(log: Logger) -> Arc { + let inner = Arc::new(Mutex::new(FakeHostInner::new(log.clone()))); + + // Plumbing to ensure that commands through the executor act on + // "FakeHostInner", by going to an appropriate callback method. + let inner_for_spawn = inner.clone(); + let inner_for_wait = inner.clone(); + let builder = FakeExecutorBuilder::new(log) + .spawn_handler(Box::new(move |child| { + FakeHostInner::handle_spawn(&inner_for_spawn, child); + })) + .wait_handler(Box::new(move |child| { + let mut inner = inner_for_wait.lock().unwrap(); + inner.handle_wait(child) + })); + + Arc::new(Self { executor: builder.build(), inner }) + } + + fn page_size(&self) -> i64 { + 4096 + } + + pub fn add_devices(&self, vdevs: &Vec) { + let mut inner = self.inner.lock().unwrap(); + + for vdev in vdevs { + inner.vdevs.insert(vdev.clone()); + } + } +} + +impl libc::Libc for FakeHost { + fn sysconf(&self, arg: i32) -> std::io::Result { + use ::libc::_SC_PAGESIZE; + + match arg { + _SC_PAGESIZE => Ok(self.page_size()), + _ => Err(std::io::Error::new( + std::io::ErrorKind::Unsupported, + "unknown sysconf", + )), + } + } +} + +impl swapctl::Swapctl for FakeHost { + fn list_swap_devices( + &self, + ) -> Result, swapctl::Error> { + Ok(self.inner.lock().unwrap().swap_devices.clone()) + } + + fn add_swap_device( + &self, + path: String, + start: u64, + length: u64, + ) -> Result<(), swapctl::Error> { + let inner = &mut self.inner.lock().unwrap(); + + const PATH_PREFIX: &str = "/dev/zvol/dsk/"; + let volume = if let Some(volume) = path.strip_prefix(PATH_PREFIX) { + match dataset::Name::new(volume.to_string()) { + Ok(name) => name, + Err(err) => { + let msg = err.to_string(); + return Err(swapctl::Error::AddDevice { + msg, + path, + start, + length, + }); + } + } + } else { + let msg = format!("path does not start with: {PATH_PREFIX}"); + return Err(swapctl::Error::AddDevice { msg, path, start, length }); + }; + + if let Some(dataset) = inner.datasets.get_dataset(&volume) { + match dataset.ty() { + dataset::Type::Volume => (), + _ => { + let msg = format!( + "Dataset '{}' exists, but is not a volume", + volume.as_str() + ); + return Err(swapctl::Error::AddDevice { + msg, + path, + start, + length, + }); + } + } + } else { + let msg = format!("Volume '{}' does not exist", volume.as_str()); + return Err(swapctl::Error::AddDevice { msg, path, start, length }); + } + + if start != 0 || length != 0 { + let msg = "Try setting start = 0 and length = 0".to_string(); + return Err(swapctl::Error::AddDevice { msg, path, start, length }); + }; + + let swap_devices = &mut inner.swap_devices; + for device in &*swap_devices { + if device.path == path { + let msg = "device already used for swap".to_string(); + return Err(swapctl::Error::AddDevice { + msg, + path, + start, + length, + }); + } + } + + swap_devices.push(swapctl::SwapDevice { + path, + start, + length, + // NOTE: Using dummy values until we have a reasonable way to + // populate this info. + total_pages: 0xffff, + free_pages: 0xffff, + flags: 0xffff, + }); + Ok(()) + } +} + +impl helios_fusion::Host for FakeHost { + fn executor(&self) -> &dyn helios_fusion::Executor { + &*self.executor + } + + fn swapctl(&self) -> &dyn swapctl::Swapctl { + self + } + + fn libc(&self) -> &dyn libc::Libc { + self + } +} + +#[derive(Debug, PartialEq)] +pub enum AddrType { + Dhcp, + Static(IpNetwork), + Addrconf, +} + +#[cfg(test)] +mod test { + use super::*; + use helios_fusion::Host; + use omicron_test_utils::dev::test_setup_log; + use std::process::Command; + use uuid::Uuid; + + #[test] + fn create_zpool_creates_dataset_too() { + let logctx = test_setup_log("create_zpool_creates_dataset_too"); + let log = &logctx.log; + + let id = Uuid::new_v4(); + let zpool_name = format!("oxp_{id}"); + let vdev = "/mydevice"; + + let host = FakeHost::new(log.clone()); + host.add_devices(&vec![Utf8PathBuf::from(vdev)]); + + // Create the zpool + let output = host + .executor() + .execute(Command::new(helios_fusion::ZPOOL).args([ + "create", + &zpool_name, + vdev, + ])) + .expect("Failed to run zpool create command"); + assert!(output.status.success()); + + // Observe the ZFS filesystem exists + let output = host + .executor() + .execute(Command::new(helios_fusion::ZFS).args([ + "list", + "-Hp", + &zpool_name, + ])) + .expect("Failed to run zfs list command"); + assert!(output.status.success()); + + logctx.cleanup_successful(); + } + + #[test] + fn zfs_list_and_get() { + let logctx = test_setup_log("zfs_list"); + let log = &logctx.log; + + let id = Uuid::new_v4(); + let vdev = "/mydevice"; + + let host = FakeHost::new(log.clone()); + host.add_devices(&vec![Utf8PathBuf::from(vdev)]); + + let zpool_name = format!("oxp_{id}"); + let dataset1_name = format!("{zpool_name}/dataset_1"); + let dataset2_name = format!("{dataset1_name}/dataset_2"); + + // Create the zpool and some datasets within + let output = host + .executor() + .execute(Command::new(helios_fusion::ZPOOL).args([ + "create", + &zpool_name, + vdev, + ])) + .expect("Failed to run zpool create command"); + assert!(output.status.success()); + let output = host + .executor() + .execute( + Command::new(helios_fusion::ZFS) + .args(["create", &dataset1_name]), + ) + .expect("Failed to run zfs create command"); + assert!(output.status.success()); + let output = host + .executor() + .execute( + Command::new(helios_fusion::ZFS) + .args(["create", &dataset2_name]), + ) + .expect("Failed to run zfs create command"); + assert!(output.status.success()); + + // ZFS List: Lists all datasets + + let output = host + .executor() + .execute(Command::new(helios_fusion::ZFS).args(["list", "-Hp"])) + .expect("Failed to run zfs list command"); + assert!(output.status.success()); + assert_eq!( + String::from_utf8(output.stdout).unwrap(), + format!("{zpool_name}\t\n{dataset1_name}\t\n{dataset2_name}\t\n") + ); + + // We can ask for properties explicitly. + let output = host + .executor() + .execute(Command::new(helios_fusion::ZFS).args([ + "list", + "-Hpo", + "name,type", + ])) + .expect("Failed to run zfs list command"); + assert!(output.status.success()); + assert_eq!( + String::from_utf8(output.stdout).unwrap(), + format!("{zpool_name}\tfilesystem\t\n{dataset1_name}\tfilesystem\t\n{dataset2_name}\tfilesystem\t\n") + ); + + // "zfs get" also works + let output = host + .executor() + .execute(Command::new(helios_fusion::ZFS).args([ + "get", + "-Hpo", + "value", + "mountpoint", + &zpool_name, + ])) + .expect("Failed to run zfs get command"); + assert!(output.status.success()); + assert_eq!( + String::from_utf8(output.stdout).unwrap(), + format!("/{zpool_name}\t\n") + ); + + // It also allows recursive traversal + // + // This only sees output from the zpool dataset, as well as "dataset 1", but not "dataset + // 2" due to the depth restriction. + let output = host + .executor() + .execute(Command::new(helios_fusion::ZFS).args([ + "get", + "-d", + "1", + "-rHpo", + "value", + "mountpoint", + &zpool_name, + ])) + .expect("Failed to run zfs get command"); + assert!(output.status.success()); + assert_eq!( + String::from_utf8(output.stdout).unwrap(), + format!("/{zpool_name}\t\nnone\t\n") + ); + + logctx.cleanup_successful(); + } +} diff --git a/helios/tokamak/src/host/zpools.rs b/helios/tokamak/src/host/zpools.rs new file mode 100644 index 00000000000..f849c9b7754 --- /dev/null +++ b/helios/tokamak/src/host/zpools.rs @@ -0,0 +1,77 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Emulates zpools + +use camino::Utf8PathBuf; +use helios_fusion::zpool::{ZpoolHealth, ZpoolName}; +use std::collections::HashMap; + +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct FakeZpool { + name: ZpoolName, + vdev: Utf8PathBuf, + + pub imported: bool, + pub health: ZpoolHealth, + pub properties: HashMap, +} + +impl FakeZpool { + pub(crate) fn new( + name: ZpoolName, + vdev: Utf8PathBuf, + imported: bool, + ) -> Self { + Self { + name, + vdev, + imported, + health: ZpoolHealth::Online, + properties: HashMap::new(), + } + } +} + +/// Describes access to zpools that exist within the system. +pub(crate) struct Zpools { + zpools: HashMap, +} + +impl Zpools { + pub(crate) fn new() -> Self { + Self { zpools: HashMap::new() } + } + + // Zpool access methods + + pub fn get(&self, name: &ZpoolName) -> Option<&FakeZpool> { + self.zpools.get(name) + } + + pub fn get_mut(&mut self, name: &ZpoolName) -> Option<&mut FakeZpool> { + self.zpools.get_mut(name) + } + + pub fn all(&self) -> impl Iterator { + self.zpools.iter() + } + + pub fn insert( + &mut self, + name: ZpoolName, + vdev: Utf8PathBuf, + import: bool, + ) -> Result<(), String> { + if self.zpools.contains_key(&name) { + return Err(format!( + "Cannot create pool name '{name}': already exists" + )); + } + + let pool = FakeZpool::new(name.clone(), vdev, import); + self.zpools.insert(name.clone(), pool); + Ok(()) + } +} diff --git a/helios/tokamak/src/lib.rs b/helios/tokamak/src/lib.rs index 352d2763c73..ee577bd5564 100644 --- a/helios/tokamak/src/lib.rs +++ b/helios/tokamak/src/lib.rs @@ -2,7 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +mod cli; mod executor; +mod host; mod shared_byte_queue; +pub mod types; pub use executor::*; +pub use host::FakeHost; diff --git a/helios/tokamak/src/shared_byte_queue.rs b/helios/tokamak/src/shared_byte_queue.rs index 52e91c266bc..68bd8bbc53d 100644 --- a/helios/tokamak/src/shared_byte_queue.rs +++ b/helios/tokamak/src/shared_byte_queue.rs @@ -3,26 +3,78 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use std::collections::VecDeque; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Condvar, Mutex}; + +struct ByteQueue { + bytes: VecDeque, + + reader_dropped: bool, + writer_dropped: bool, +} + +impl ByteQueue { + fn new() -> Self { + Self { + bytes: VecDeque::new(), + reader_dropped: false, + writer_dropped: false, + } + } +} + +struct SharedByteQueueInner { + byte_queue: Mutex, + + // Allows callers to block until data can be read. + cvar: Condvar, +} + +impl SharedByteQueueInner { + fn new() -> Self { + Self { byte_queue: Mutex::new(ByteQueue::new()), cvar: Condvar::new() } + } +} /// A queue of bytes that can selectively act as a reader or writer, /// which can also be cloned. /// /// This is primarily used to emulate stdin / stdout / stderr. #[derive(Clone)] -pub struct SharedByteQueue { - buf: Arc>>, -} +pub struct SharedByteQueue(Arc); impl SharedByteQueue { pub fn new() -> Self { - Self { buf: Arc::new(Mutex::new(VecDeque::new())) } + Self(Arc::new(SharedByteQueueInner::new())) + } + + pub fn take_writer(&self) -> SharedByteQueueWriter { + SharedByteQueueWriter(self.0.clone()) + } + + pub fn take_reader(&self) -> SharedByteQueueReader { + SharedByteQueueReader(self.0.clone()) + } +} + +pub struct SharedByteQueueWriter(Arc); + +impl Drop for SharedByteQueueWriter { + fn drop(&mut self) { + let mut bq = self.0.byte_queue.lock().unwrap(); + bq.writer_dropped = true; + self.0.cvar.notify_all(); } } -impl std::io::Write for SharedByteQueue { +impl std::io::Write for SharedByteQueueWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.buf.lock().unwrap().write(buf) + let mut bq = self.0.byte_queue.lock().unwrap(); + if bq.reader_dropped { + return Ok(0); + } + let n = bq.bytes.write(buf)?; + self.0.cvar.notify_all(); + Ok(n) } fn flush(&mut self) -> std::io::Result<()> { @@ -30,8 +82,71 @@ impl std::io::Write for SharedByteQueue { } } -impl std::io::Read for SharedByteQueue { +pub struct SharedByteQueueReader(Arc); + +impl std::io::Read for SharedByteQueueReader { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - self.buf.lock().unwrap().read(buf) + let mut bq = self.0.byte_queue.lock().unwrap(); + + loop { + let n = bq.bytes.read(buf)?; + if n > 0 { + return Ok(n); + } + if bq.writer_dropped { + return Ok(0); + } + + bq = self + .0 + .cvar + .wait_while(bq, |bq| !bq.writer_dropped && bq.bytes.is_empty()) + .unwrap(); + } + } +} + +impl Drop for SharedByteQueueReader { + fn drop(&mut self) { + let mut bq = self.0.byte_queue.lock().unwrap(); + bq.reader_dropped = true; + self.0.cvar.notify_all(); + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::io::{Read, Write}; + + #[test] + fn blocking_reader() { + let bq = SharedByteQueue::new(); + + let mut reader = bq.take_reader(); + let mut writer = bq.take_writer(); + + // This represents our "Process", which could be reading a collection + // of bytes until stdin completes. + let handle = std::thread::spawn(move || { + let mut buf = Vec::new(); + reader.read_to_end(&mut buf).expect("Failed to read"); + buf + }); + + // This represents someone interacting with the process, dumping to + // stdin. + let input1 = b"What you're referring to as bytes,\n"; + let input2 = b"is in fact, bytes/Vec"; + let input = [input1.as_slice(), input2.as_slice()].concat(); + + // Write all the bytes, observe that the reader doesn't exit early. + writer.write_all(input1).expect("Failed to write"); + std::thread::sleep(std::time::Duration::from_millis(10)); + writer.write_all(input2).expect("Failed to write"); + drop(writer); + + let output = handle.join().unwrap(); + assert_eq!(input, output.as_slice()); } } diff --git a/helios/tokamak/src/types/dataset.rs b/helios/tokamak/src/types/dataset.rs new file mode 100644 index 00000000000..17160b0d6c8 --- /dev/null +++ b/helios/tokamak/src/types/dataset.rs @@ -0,0 +1,169 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::fmt; + +/// The name of a ZFS filesystem, volume, or snapshot +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct Name(String); + +impl Name { + pub fn new>(s: S) -> Result { + let s: String = s.into(); + if s.is_empty() { + return Err("Invalid name: Empty string".to_string()); + } + if s.ends_with('/') { + return Err(format!("Invalid name {s}: trailing slash in name")); + } + + Ok(Self(s)) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for Name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive( + strum::Display, + strum::EnumString, + strum::IntoStaticStr, + Copy, + Clone, + Debug, + PartialEq, + Eq, + Hash, +)] +#[strum(use_phf, serialize_all = "lowercase")] +pub(crate) enum Type { + Filesystem, + Snapshot, + Volume, +} + +bitflags::bitflags! { + /// The classes of datasets for which a property is valid. + pub(crate) struct PropertyTarget: u8 { + const FILESYSTEM = 0b0001; + const SNAPSHOT = 0b0010; + const VOLUME = 0b0100; + } +} + +impl From for PropertyTarget { + fn from(ty: Type) -> Self { + use Type::*; + match ty { + Filesystem => PropertyTarget::FILESYSTEM, + Snapshot => PropertyTarget::SNAPSHOT, + Volume => PropertyTarget::VOLUME, + } + } +} + +/// The ability of users to modify properties +#[derive(Eq, PartialEq)] +pub(crate) enum PropertyAccess { + ReadOnly, + ReadWrite, +} + +/// A property which is applicable to datasets +#[derive( + strum::Display, + strum::EnumString, + strum::IntoStaticStr, + Copy, + Clone, + Debug, + PartialEq, + Eq, + Hash, +)] +#[strum(use_phf, serialize_all = "lowercase")] +pub(crate) enum Property { + Atime, + #[strum(serialize = "available", serialize = "avail")] + Available, + Encryption, + Logbias, + Mounted, + Mountpoint, + Name, + Keyformat, + Keylocation, + #[strum(serialize = "oxide:epoch")] + OxideEpoch, + Primarycache, + #[strum(serialize = "reservation", serialize = "refreservation")] + Reservation, + Secondarycache, + Type, + Volblocksize, + Volsize, + Zoned, +} + +impl Property { + pub fn access(&self) -> PropertyAccess { + use Property::*; + use PropertyAccess::*; + + match self { + Atime => ReadWrite, + Available => ReadOnly, + Encryption => ReadWrite, + Logbias => ReadWrite, + Mounted => ReadOnly, + Mountpoint => ReadWrite, + Name => ReadOnly, + Keyformat => ReadWrite, + Keylocation => ReadWrite, + OxideEpoch => ReadWrite, + Primarycache => ReadWrite, + Reservation => ReadOnly, + Secondarycache => ReadWrite, + Type => ReadOnly, + Volblocksize => ReadOnly, + Volsize => ReadOnly, + Zoned => ReadWrite, + } + } + + pub fn target(&self) -> PropertyTarget { + let fs = PropertyTarget::FILESYSTEM; + let all = PropertyTarget::all(); + let fs_and_vol = PropertyTarget::FILESYSTEM | PropertyTarget::VOLUME; + let vol = PropertyTarget::VOLUME; + + use Property::*; + match self { + Atime => fs, + Available => all, + Encryption => all, + Logbias => fs_and_vol, + Mounted => fs, + Mountpoint => fs, + Name => all, + Keyformat => fs_and_vol, + Keylocation => fs_and_vol, + OxideEpoch => all, + Primarycache => fs_and_vol, + Reservation => vol, + Secondarycache => fs_and_vol, + Type => all, + Volblocksize => vol, + Volsize => vol, + Zoned => all, + } + } +} diff --git a/helios/tokamak/src/types/mod.rs b/helios/tokamak/src/types/mod.rs new file mode 100644 index 00000000000..49b9a778e7d --- /dev/null +++ b/helios/tokamak/src/types/mod.rs @@ -0,0 +1,7 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types which may be parsed from the CLI and used by a fake host + +pub mod dataset; diff --git a/illumos-utils/src/coreadm.rs b/illumos-utils/src/coreadm.rs index 543dbca239c..aed3e11786f 100644 --- a/illumos-utils/src/coreadm.rs +++ b/illumos-utils/src/coreadm.rs @@ -1,4 +1,5 @@ use camino::Utf8PathBuf; +use helios_fusion::COREADM; use std::ffi::OsString; use std::os::unix::ffi::OsStringExt; use std::process::Command; @@ -21,8 +22,6 @@ pub enum CoreAdmError { Exec(std::io::Error), } -const COREADM: &str = "/usr/bin/coreadm"; - pub fn coreadm(core_dir: &Utf8PathBuf) -> Result<(), CoreAdmError> { let mut cmd = Command::new(COREADM); cmd.env_clear(); diff --git a/illumos-utils/src/dladm.rs b/illumos-utils/src/dladm.rs index e42add426a3..1c732a7645b 100644 --- a/illumos-utils/src/dladm.rs +++ b/illumos-utils/src/dladm.rs @@ -13,6 +13,8 @@ use serde::{Deserialize, Serialize}; use std::str::FromStr; use std::str::Utf8Error; +pub use helios_fusion::DLADM; + pub const VNIC_PREFIX: &str = "ox"; pub const VNIC_PREFIX_CONTROL: &str = "oxControl"; pub const VNIC_PREFIX_BOOTSTRAP: &str = "oxBootstrap"; @@ -22,9 +24,6 @@ pub const VNIC_PREFIX_BOOTSTRAP: &str = "oxBootstrap"; // Viona, and thus plumbed directly to guests. pub const VNIC_PREFIX_GUEST: &str = "vopte"; -/// Path to the DLADM command. -pub const DLADM: &str = "/usr/sbin/dladm"; - /// The name of the etherstub to be created for the underlay network. pub const UNDERLAY_ETHERSTUB_NAME: &str = "underlay_stub0"; diff --git a/illumos-utils/src/dumpadm.rs b/illumos-utils/src/dumpadm.rs index fb882f72e1b..2e8a69afd6c 100644 --- a/illumos-utils/src/dumpadm.rs +++ b/illumos-utils/src/dumpadm.rs @@ -2,10 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use helios_fusion::{BoxedExecutor, ExecutionError}; - use byteorder::{LittleEndian, ReadBytesExt}; use camino::Utf8PathBuf; +use helios_fusion::{BoxedExecutor, ExecutionError, DUMPADM, SAVECORE}; use std::ffi::OsString; use std::fs::File; use std::io::{Seek, SeekFrom}; @@ -81,9 +80,6 @@ pub fn dump_flag_is_valid( Ok((flags & DF_VALID) != 0) } -const DUMPADM: &str = "/usr/sbin/dumpadm"; -const SAVECORE: &str = "/usr/bin/savecore"; - #[derive(thiserror::Error, Debug)] pub enum DumpAdmError { #[error("Error obtaining or modifying dump configuration. dump_slice: {dump_slice}, savecore_dir: {savecore_dir:?}")] diff --git a/illumos-utils/src/fstyp.rs b/illumos-utils/src/fstyp.rs index 41d0317f1dc..2cb67a538ca 100644 --- a/illumos-utils/src/fstyp.rs +++ b/illumos-utils/src/fstyp.rs @@ -4,13 +4,12 @@ //! Helper for calling fstyp. -use crate::zpool::ZpoolName; use camino::Utf8Path; -use helios_fusion::{BoxedExecutor, ExecutionError, PFEXEC}; +use helios_fusion::{ + zpool::ZpoolName, BoxedExecutor, ExecutionError, FSTYP, PFEXEC, +}; use std::str::FromStr; -const FSTYP: &str = "/usr/sbin/fstyp"; - #[derive(thiserror::Error, Debug)] pub enum Error { #[error("fstype output is not valid UTF-8: {0}")] diff --git a/illumos-utils/src/lib.rs b/illumos-utils/src/lib.rs index d1375224ccc..40da4231566 100644 --- a/illumos-utils/src/lib.rs +++ b/illumos-utils/src/lib.rs @@ -4,7 +4,6 @@ //! Wrappers around illumos-specific commands. -pub mod addrobj; pub mod coreadm; pub mod destructor; pub mod dkio; diff --git a/illumos-utils/src/libc.rs b/illumos-utils/src/libc.rs index 02d8c613307..a307a027668 100644 --- a/illumos-utils/src/libc.rs +++ b/illumos-utils/src/libc.rs @@ -4,6 +4,8 @@ //! Miscellaneous FFI wrapper functions for libc +// TODO: Mark deprecated in favor of helios_fusion interface + /// sysconf(3c) pub fn sysconf(arg: i32) -> std::io::Result { let res = unsafe { libc::sysconf(arg) }; diff --git a/illumos-utils/src/opte/illumos.rs b/illumos-utils/src/opte/illumos.rs index 88e8d343b14..98fd2ae1999 100644 --- a/illumos-utils/src/opte/illumos.rs +++ b/illumos-utils/src/opte/illumos.rs @@ -4,9 +4,9 @@ //! Interactions with the Oxide Packet Transformation Engine (OPTE) -use crate::addrobj::AddrObject; use crate::dladm; use camino::Utf8Path; +use helios_fusion::addrobj::AddrObject; use omicron_common::api::internal::shared::NetworkInterfaceKind; use opte_ioctl::OpteHdl; use slog::info; @@ -33,7 +33,7 @@ pub enum Error { IncompatibleKernel, #[error(transparent)] - BadAddrObj(#[from] crate::addrobj::ParseError), + BadAddrObj(#[from] helios_fusion::addrobj::ParseError), #[error(transparent)] SetLinkpropError(#[from] crate::dladm::SetLinkpropError), diff --git a/illumos-utils/src/opte/non_illumos.rs b/illumos-utils/src/opte/non_illumos.rs index 1893c7f1aaa..0b9301adae8 100644 --- a/illumos-utils/src/opte/non_illumos.rs +++ b/illumos-utils/src/opte/non_illumos.rs @@ -6,7 +6,7 @@ use slog::Logger; -use crate::addrobj::AddrObject; +use helios_fusion::addrobj::AddrObject; use omicron_common::api::internal::shared::NetworkInterfaceKind; #[derive(thiserror::Error, Debug)] diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index 1e91aaadb56..69589ad39e4 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -4,14 +4,14 @@ //! Utilities to manage running zones. -use crate::addrobj::AddrObject; use crate::dladm::Etherstub; use crate::link::{Link, VnicAllocator}; use crate::opte::{Port, PortTicket}; use crate::svc::wait_for_service; use crate::zone::{AddressRequest, Zones, IPADM, ZONE_PREFIX}; use camino::{Utf8Path, Utf8PathBuf}; -use helios_fusion::{BoxedExecutor, ExecutionError}; +use helios_fusion::addrobj::AddrObject; +use helios_fusion::{BoxedExecutor, ExecutionError, ROUTE}; use ipnetwork::IpNetwork; use omicron_common::backoff; use slog::{error, info, o, warn, Logger}; @@ -64,7 +64,7 @@ pub enum EnsureAddressError { AddrObject { request: AddressRequest, zone: String, - err: crate::addrobj::ParseError, + err: helios_fusion::addrobj::ParseError, }, #[error(transparent)] @@ -126,7 +126,7 @@ pub enum GetZoneError { AddrObject { name: String, #[source] - err: crate::addrobj::ParseError, + err: helios_fusion::addrobj::ParseError, }, #[error( @@ -680,13 +680,7 @@ impl RunningZone { "-ifp", port.vnic_name(), ])?; - self.run_cmd(&[ - "/usr/sbin/route", - "add", - "-inet", - "default", - &gateway_ip, - ])?; + self.run_cmd(&[ROUTE, "add", "-inet", "default", &gateway_ip])?; Ok(addr) } else { // If the port is using IPv6 addressing we still want it to use @@ -757,7 +751,7 @@ impl RunningZone { gateway: Ipv6Addr, ) -> Result<(), RunCommandError> { self.run_cmd([ - "/usr/sbin/route", + ROUTE, "add", "-inet6", "default", @@ -771,12 +765,7 @@ impl RunningZone { &self, gateway: Ipv4Addr, ) -> Result<(), RunCommandError> { - self.run_cmd([ - "/usr/sbin/route", - "add", - "default", - &gateway.to_string(), - ])?; + self.run_cmd([ROUTE, "add", "default", &gateway.to_string()])?; Ok(()) } @@ -787,7 +776,7 @@ impl RunningZone { zone_vnic_name: &str, ) -> Result<(), RunCommandError> { self.run_cmd([ - "/usr/sbin/route", + ROUTE, "add", "-inet6", &format!("{bootstrap_prefix:x}::/16"), diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 980709597e7..6779ffab91b 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -9,6 +9,8 @@ use helios_fusion::{BoxedExecutor, ExecutionError, PFEXEC}; use omicron_common::disk::DiskIdentity; use std::fmt; +pub use helios_fusion::ZFS; + // These locations in the ramdisk must only be used by the switch zone. // // We need the switch zone online before we can create the U.2 drives and @@ -19,7 +21,6 @@ use std::fmt; pub const ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT: &str = "/zone"; pub const ZONE_ZFS_RAMDISK_DATASET: &str = "rpool/zone"; -pub const ZFS: &str = "/usr/sbin/zfs"; pub const KEYPATH_ROOT: &str = "/var/run/oxide/"; /// Error returned by [`Zfs::list_datasets`]. @@ -427,8 +428,14 @@ impl Zfs { name: &str, ) -> Result { let mut command = std::process::Command::new(PFEXEC); - let cmd = - command.args(&[ZFS, "get", "-Ho", "value", &name, filesystem_name]); + let cmd = command.args(&[ + ZFS, + "get", + "-Hpo", + "value", + &name, + filesystem_name, + ]); let output = executor.execute(cmd).map_err(|err| GetValueError { filesystem: filesystem_name.to_string(), name: name.to_string(), @@ -458,7 +465,7 @@ pub fn get_all_omicron_datasets_for_delete( // This includes cockroachdb, clickhouse, and crucible datasets. let zpools = crate::zpool::Zpool::list(executor)?; for pool in &zpools { - let internal = pool.kind() == crate::zpool::ZpoolKind::Internal; + let internal = pool.kind() == helios_fusion::zpool::ZpoolKind::Internal; let pool = pool.to_string(); for dataset in &Zfs::list_datasets(executor, &pool)? { // Avoid erasing crashdump datasets on internal pools diff --git a/illumos-utils/src/zone.rs b/illumos-utils/src/zone.rs index 209c403fd2d..98ec0daa106 100644 --- a/illumos-utils/src/zone.rs +++ b/illumos-utils/src/zone.rs @@ -4,26 +4,22 @@ //! API for interacting with Zones running Propolis. +use crate::dladm::{EtherstubVnic, VNIC_PREFIX_BOOTSTRAP, VNIC_PREFIX_CONTROL}; + use anyhow::anyhow; use camino::Utf8Path; +use helios_fusion::addrobj::AddrObject; +use helios_fusion::{BoxedExecutor, ExecutionError, PFEXEC}; use ipnetwork::IpNetwork; use ipnetwork::IpNetworkError; +use omicron_common::address::SLED_PREFIX; use slog::info; use slog::Logger; use std::net::{IpAddr, Ipv6Addr}; -use crate::addrobj::AddrObject; -use crate::dladm::{EtherstubVnic, VNIC_PREFIX_BOOTSTRAP, VNIC_PREFIX_CONTROL}; -use helios_fusion::{BoxedExecutor, ExecutionError, PFEXEC}; -use omicron_common::address::SLED_PREFIX; - -const DLADM: &str = "/usr/sbin/dladm"; -pub const IPADM: &str = "/usr/sbin/ipadm"; -pub const SVCADM: &str = "/usr/sbin/svcadm"; -pub const SVCCFG: &str = "/usr/sbin/svccfg"; -pub const ZLOGIN: &str = "/usr/sbin/zlogin"; -pub const ZONEADM: &str = "/usr/sbin/zoneadm"; -pub const ZONECFG: &str = "/usr/sbin/zonecfg"; +pub use helios_fusion::{ + DLADM, IPADM, SVCADM, SVCCFG, ZLOGIN, ZONEADM, ZONECFG, +}; // TODO: These could become enums pub const ZONE_PREFIX: &str = "oxz_"; @@ -35,7 +31,7 @@ enum Error { Execution(#[from] ExecutionError), #[error(transparent)] - AddrObject(#[from] crate::addrobj::ParseError), + AddrObject(#[from] helios_fusion::addrobj::ParseError), #[error("Address not found: {addrobj}")] AddressNotFound { addrobj: AddrObject }, diff --git a/illumos-utils/src/zpool.rs b/illumos-utils/src/zpool.rs index d96efc60d70..49a35c21328 100644 --- a/illumos-utils/src/zpool.rs +++ b/illumos-utils/src/zpool.rs @@ -4,21 +4,15 @@ //! Utilities for managing Zpools. -use camino::{Utf8Path, Utf8PathBuf}; +use camino::Utf8Path; +use helios_fusion::zpool::ParseError; use helios_fusion::{BoxedExecutor, ExecutionError, PFEXEC}; -use schemars::JsonSchema; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; -use uuid::Uuid; -const ZPOOL_EXTERNAL_PREFIX: &str = "oxp_"; -const ZPOOL_INTERNAL_PREFIX: &str = "oxi_"; -pub const ZPOOL: &str = "/usr/sbin/zpool"; - -#[derive(thiserror::Error, Debug, PartialEq, Eq)] -#[error("Failed to parse output: {0}")] -pub struct ParseError(String); +pub use helios_fusion::zpool::ZpoolHealth; +pub use helios_fusion::zpool::ZpoolInfo; +pub use helios_fusion::zpool::ZpoolKind; +pub use helios_fusion::zpool::ZpoolName; +pub use helios_fusion::ZPOOL; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -54,114 +48,6 @@ pub struct GetInfoError { err: Error, } -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum ZpoolHealth { - /// The device is online and functioning. - Online, - /// One or more components are degraded or faulted, but sufficient - /// replicas exist to continue functioning. - Degraded, - /// One or more components are degraded or faulted, and insufficient - /// replicas exist to continue functioning. - Faulted, - /// The device was explicitly taken offline by "zpool offline". - Offline, - /// The device was physically removed. - Removed, - /// The device could not be opened. - Unavailable, -} - -impl FromStr for ZpoolHealth { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - match s { - "ONLINE" => Ok(ZpoolHealth::Online), - "DEGRADED" => Ok(ZpoolHealth::Degraded), - "FAULTED" => Ok(ZpoolHealth::Faulted), - "OFFLINE" => Ok(ZpoolHealth::Offline), - "REMOVED" => Ok(ZpoolHealth::Removed), - "UNAVAIL" => Ok(ZpoolHealth::Unavailable), - _ => Err(ParseError(format!("Unrecognized zpool 'health': {}", s))), - } - } -} - -/// Describes a Zpool. -#[derive(Clone, Debug)] -pub struct ZpoolInfo { - name: String, - size: u64, - allocated: u64, - free: u64, - health: ZpoolHealth, -} - -impl ZpoolInfo { - pub fn name(&self) -> &str { - &self.name - } - - pub fn size(&self) -> u64 { - self.size - } - - #[allow(dead_code)] - pub fn allocated(&self) -> u64 { - self.allocated - } - - #[allow(dead_code)] - pub fn free(&self) -> u64 { - self.free - } - - #[allow(dead_code)] - pub fn health(&self) -> ZpoolHealth { - self.health - } -} - -impl FromStr for ZpoolInfo { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - // Lambda helpers for error handling. - let expected_field = |name| { - ParseError(format!("Missing '{}' value in zpool list output", name)) - }; - let failed_to_parse = |name, err| { - ParseError(format!("Failed to parse field '{}': {}", name, err)) - }; - - let mut values = s.trim().split_whitespace(); - let name = - values.next().ok_or_else(|| expected_field("name"))?.to_string(); - let size = values - .next() - .ok_or_else(|| expected_field("size"))? - .parse::() - .map_err(|e| failed_to_parse("size", e))?; - let allocated = values - .next() - .ok_or_else(|| expected_field("allocated"))? - .parse::() - .map_err(|e| failed_to_parse("allocated", e))?; - let free = values - .next() - .ok_or_else(|| expected_field("free"))? - .parse::() - .map_err(|e| failed_to_parse("free", e))?; - let health = values - .next() - .ok_or_else(|| expected_field("health"))? - .parse::()?; - - Ok(ZpoolInfo { name, size, allocated, free, health }) - } -} - /// Wraps commands for interacting with ZFS pools. pub struct Zpool {} @@ -261,7 +147,6 @@ impl Zpool { Ok(zpool) } - #[cfg_attr(test, allow(dead_code))] pub fn get_info( executor: &BoxedExecutor, name: &str, @@ -285,299 +170,3 @@ impl Zpool { Ok(zpool) } } - -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum ZpoolKind { - // This zpool is used for external storage (u.2) - External, - // This zpool is used for internal storage (m.2) - Internal, -} - -/// A wrapper around a zpool name. -/// -/// This expects that the format will be: `ox{i,p}_` - we parse the prefix -/// when reading the structure, and validate that the UUID can be utilized. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct ZpoolName { - id: Uuid, - kind: ZpoolKind, -} - -const ZPOOL_NAME_REGEX: &str = r"^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"; - -/// Custom JsonSchema implementation to encode the constraints on Name. -impl JsonSchema for ZpoolName { - fn schema_name() -> String { - "ZpoolName".to_string() - } - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some( - "The name of a Zpool".to_string(), - ), - description: Some( - "Zpool names are of the format ox{i,p}_. They are either \ - Internal or External, and should be unique" - .to_string(), - ), - ..Default::default() - })), - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some(ZPOOL_NAME_REGEX.to_owned()), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -impl ZpoolName { - pub fn new_internal(id: Uuid) -> Self { - Self { id, kind: ZpoolKind::Internal } - } - - pub fn new_external(id: Uuid) -> Self { - Self { id, kind: ZpoolKind::External } - } - - pub fn id(&self) -> Uuid { - self.id - } - - pub fn kind(&self) -> ZpoolKind { - self.kind - } - - /// Returns a path to a dataset's mountpoint within the zpool. - /// - /// For example: oxp_(UUID) -> /pool/ext/(UUID)/(dataset) - pub fn dataset_mountpoint(&self, dataset: &str) -> Utf8PathBuf { - let mut path = Utf8PathBuf::new(); - path.push("/pool"); - match self.kind { - ZpoolKind::External => path.push("ext"), - ZpoolKind::Internal => path.push("int"), - }; - path.push(self.id().to_string()); - path.push(dataset); - path - } -} - -impl<'de> Deserialize<'de> for ZpoolName { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - ZpoolName::from_str(&s).map_err(serde::de::Error::custom) - } -} - -impl Serialize for ZpoolName { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -impl FromStr for ZpoolName { - type Err = String; - - fn from_str(s: &str) -> Result { - if let Some(s) = s.strip_prefix(ZPOOL_EXTERNAL_PREFIX) { - let id = Uuid::from_str(s).map_err(|e| e.to_string())?; - Ok(ZpoolName::new_external(id)) - } else if let Some(s) = s.strip_prefix(ZPOOL_INTERNAL_PREFIX) { - let id = Uuid::from_str(s).map_err(|e| e.to_string())?; - Ok(ZpoolName::new_internal(id)) - } else { - Err(format!( - "Bad zpool name {s}; must start with '{ZPOOL_EXTERNAL_PREFIX}' or '{ZPOOL_INTERNAL_PREFIX}'", - )) - } - } -} - -impl fmt::Display for ZpoolName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let prefix = match self.kind { - ZpoolKind::External => ZPOOL_EXTERNAL_PREFIX, - ZpoolKind::Internal => ZPOOL_INTERNAL_PREFIX, - }; - write!(f, "{prefix}{}", self.id) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_zpool_name_regex() { - let valid = [ - "oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - ]; - - let invalid = [ - "", - // Whitespace - " oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b ", - // Case sensitivity - "oxp_D462A7F7-b628-40fe-80ff-4e4189e2d62b", - // Bad prefix - "ox_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxa_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxi-d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp-d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Missing Prefix - "d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Bad UUIDs (Not following UUIDv4 format) - "oxi_d462a7f7-b628-30fe-80ff-4e4189e2d62b", - "oxi_d462a7f7-b628-40fe-c0ff-4e4189e2d62b", - ]; - - let r = regress::Regex::new(ZPOOL_NAME_REGEX) - .expect("validation regex is valid"); - for input in valid { - let m = r - .find(input) - .unwrap_or_else(|| panic!("input {input} did not match regex")); - assert_eq!(m.start(), 0, "input {input} did not match start"); - assert_eq!(m.end(), input.len(), "input {input} did not match end"); - } - - for input in invalid { - assert!( - r.find(input).is_none(), - "invalid input {input} should not match validation regex" - ); - } - } - - #[test] - fn test_parse_zpool_name_json() { - #[derive(Serialize, Deserialize, JsonSchema)] - struct TestDataset { - pool_name: ZpoolName, - } - - // Confirm that we can convert from a JSON string to a a ZpoolName - let json_string = - r#"{"pool_name":"oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b"}"#; - let dataset: TestDataset = serde_json::from_str(json_string) - .expect("Could not parse ZpoolName from Json Object"); - assert!(matches!(dataset.pool_name.kind, ZpoolKind::Internal)); - - // Confirm we can go the other way (ZpoolName to JSON string) too. - let j = serde_json::to_string(&dataset) - .expect("Cannot convert back to JSON string"); - assert_eq!(j, json_string); - } - - fn toml_string(s: &str) -> String { - format!("zpool_name = \"{}\"", s) - } - - fn parse_name(s: &str) -> Result { - toml_string(s) - .parse::() - .expect("Cannot parse as TOML value") - .get("zpool_name") - .expect("Missing key") - .clone() - .try_into::() - } - - #[test] - fn test_parse_external_zpool_name() { - let uuid: Uuid = - "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); - let good_name = format!("{}{}", ZPOOL_EXTERNAL_PREFIX, uuid); - - let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); - assert_eq!(uuid, name.id()); - assert_eq!(ZpoolKind::External, name.kind()); - } - - #[test] - fn test_parse_internal_zpool_name() { - let uuid: Uuid = - "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); - let good_name = format!("{}{}", ZPOOL_INTERNAL_PREFIX, uuid); - - let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); - assert_eq!(uuid, name.id()); - assert_eq!(ZpoolKind::Internal, name.kind()); - } - - #[test] - fn test_parse_bad_zpool_names() { - let bad_names = vec![ - // Nonsense string - "this string is GARBAGE", - // Missing prefix - "d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Underscores - "oxp_d462a7f7_b628_40fe_80ff_4e4189e2d62b", - ]; - - for bad_name in &bad_names { - assert!( - parse_name(&bad_name).is_err(), - "Parsing {} should fail", - bad_name - ); - } - } - - #[test] - fn test_parse_zpool() { - let name = "rpool"; - let size = 10000; - let allocated = 6000; - let free = 4000; - let health = "ONLINE"; - - // We should be able to tolerate any whitespace between columns. - let input = format!( - "{} {} {} \t\t\t {} {}", - name, size, allocated, free, health - ); - let output: ZpoolInfo = input.parse().unwrap(); - assert_eq!(output.name(), name); - assert_eq!(output.size(), size); - assert_eq!(output.allocated(), allocated); - assert_eq!(output.free(), free); - assert_eq!(output.health(), ZpoolHealth::Online); - } - - #[test] - fn test_parse_zpool_missing_column() { - let name = "rpool"; - let size = 10000; - let allocated = 6000; - let free = 4000; - let _health = "ONLINE"; - - // Similar to the prior test case, just omit "health". - let input = format!("{} {} {} {}", name, size, allocated, free); - let result: Result = input.parse(); - - let expected_err = ParseError( - "Missing 'health' value in zpool list output".to_owned(), - ); - assert_eq!(result.unwrap_err(), expected_err,); - } -} diff --git a/installinator/src/bootstrap.rs b/installinator/src/bootstrap.rs index 017e5d0b507..bb1f91dfba2 100644 --- a/installinator/src/bootstrap.rs +++ b/installinator/src/bootstrap.rs @@ -10,8 +10,8 @@ use anyhow::ensure; use anyhow::Context; use anyhow::Result; use ddm_admin_client::Client as DdmAdminClient; +use helios_fusion::addrobj::AddrObject; use helios_fusion::BoxedExecutor; -use illumos_utils::addrobj::AddrObject; use illumos_utils::dladm; use illumos_utils::dladm::Dladm; use illumos_utils::zone::Zones; diff --git a/sled-agent/src/bootstrap/maghemite.rs b/sled-agent/src/bootstrap/maghemite.rs index 1adc677b238..eee55da1917 100644 --- a/sled-agent/src/bootstrap/maghemite.rs +++ b/sled-agent/src/bootstrap/maghemite.rs @@ -4,7 +4,7 @@ //! Starting the mg-ddm service. -use illumos_utils::addrobj::AddrObject; +use helios_fusion::addrobj::AddrObject; use slog::Logger; use thiserror::Error; diff --git a/sled-agent/src/bootstrap/pre_server.rs b/sled-agent/src/bootstrap/pre_server.rs index 91a482c8553..c9c794b635e 100644 --- a/sled-agent/src/bootstrap/pre_server.rs +++ b/sled-agent/src/bootstrap/pre_server.rs @@ -22,9 +22,9 @@ use cancel_safe_futures::TryStreamExt; use ddm_admin_client::Client as DdmAdminClient; use futures::stream; use futures::StreamExt; +use helios_fusion::addrobj::AddrObject; use helios_fusion::BoxedExecutor; use helios_protostar::HostExecutor; -use illumos_utils::addrobj::AddrObject; use illumos_utils::dladm; use illumos_utils::dladm::Dladm; use illumos_utils::zfs; diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index e74a3d7f18d..2f94713bde3 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -46,9 +46,9 @@ use camino::{Utf8Path, Utf8PathBuf}; use ddm_admin_client::{Client as DdmAdminClient, DdmError}; use dpd_client::{types as DpdTypes, Client as DpdClient, Error as DpdError}; use dropshot::HandlerTaskMode; +use helios_fusion::addrobj::AddrObject; +use helios_fusion::addrobj::IPV6_LINK_LOCAL_NAME; use helios_fusion::{BoxedExecutor, PFEXEC}; -use illumos_utils::addrobj::AddrObject; -use illumos_utils::addrobj::IPV6_LINK_LOCAL_NAME; use illumos_utils::dladm::{ Dladm, Etherstub, EtherstubVnic, GetSimnetError, PhysicalLink, }; diff --git a/sled-hardware/src/underlay.rs b/sled-hardware/src/underlay.rs index ccaf277adb7..b967ed0a888 100644 --- a/sled-hardware/src/underlay.rs +++ b/sled-hardware/src/underlay.rs @@ -5,9 +5,9 @@ //! Finding the underlay network physical links and address objects. use crate::is_gimlet; +use helios_fusion::addrobj; +use helios_fusion::addrobj::AddrObject; use helios_fusion::BoxedExecutor; -use illumos_utils::addrobj; -use illumos_utils::addrobj::AddrObject; use illumos_utils::dladm; use illumos_utils::dladm::Dladm; use illumos_utils::dladm::FindPhysicalLinkError;