From af09cdd3b292df01c6804b7445c9cd1beb2ccf9f Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Tue, 23 Sep 2025 00:30:10 +0000 Subject: [PATCH 01/18] Update Propolis and Crucible Crucible changes are: update to latest `vergen` (#1770) Update rand dependencies, and fallout from that. (#1764) [crucible-downstairs] migrate to API traits (#1768) [crucible-agent] migrate to API trait (#1766) [crucible-pantry] migrate to API trait (#1767) Add back job delays in the downstairs with the --lossy flag (#1761) Propolis changes are: Crucible update plus a few other dependency changes. (#948) [2/n] [propolis-server] switch to API trait (#946) [1/n] add a temporary indent to propolis server APIs (#945) Handle Intel CPUID leaves 4 and 18h, specialize CPUID for VM shape (#941) Increase viona receive queue length to 2048 (#935) Expand viona header pad to account for options (#937) fix linux p9fs multi message reads (#932) add a D script to report VMs' CPUID queries (#934) Update GH actions Re-enable viona packet data loaning --- Cargo.lock | 73 ++++++++++++++++++++++--------------------- Cargo.toml | 16 +++++----- package-manifest.toml | 16 +++++----- 3 files changed, 54 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1860f7bf299..e09dce5e60b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -692,7 +692,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "bhyve_api_sys", "libc", @@ -702,7 +702,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "libc", "strum 0.26.3", @@ -1145,16 +1145,16 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.18.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform 0.1.8", "semver 1.0.26", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.16", ] [[package]] @@ -2060,7 +2060,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=779775d5130ff7a4836f52f48b7e64d1479ee104#779775d5130ff7a4836f52f48b7e64d1479ee104" +source = "git+https://github.com/oxidecomputer/crucible?rev=65ca41e821ef53ec9c28909357f23e3348169e4f#65ca41e821ef53ec9c28909357f23e3348169e4f" dependencies = [ "anyhow", "chrono", @@ -2076,7 +2076,7 @@ dependencies = [ [[package]] name = "crucible-client-types" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=779775d5130ff7a4836f52f48b7e64d1479ee104#779775d5130ff7a4836f52f48b7e64d1479ee104" +source = "git+https://github.com/oxidecomputer/crucible?rev=65ca41e821ef53ec9c28909357f23e3348169e4f#65ca41e821ef53ec9c28909357f23e3348169e4f" dependencies = [ "base64 0.22.1", "crucible-workspace-hack", @@ -2089,7 +2089,7 @@ dependencies = [ [[package]] name = "crucible-common" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=779775d5130ff7a4836f52f48b7e64d1479ee104#779775d5130ff7a4836f52f48b7e64d1479ee104" +source = "git+https://github.com/oxidecomputer/crucible?rev=65ca41e821ef53ec9c28909357f23e3348169e4f#65ca41e821ef53ec9c28909357f23e3348169e4f" dependencies = [ "anyhow", "atty", @@ -2112,13 +2112,14 @@ dependencies = [ "toml 0.8.23", "twox-hash", "uuid", - "vergen 8.3.2", + "vergen", + "vergen-git2", ] [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=779775d5130ff7a4836f52f48b7e64d1479ee104#779775d5130ff7a4836f52f48b7e64d1479ee104" +source = "git+https://github.com/oxidecomputer/crucible?rev=65ca41e821ef53ec9c28909357f23e3348169e4f#65ca41e821ef53ec9c28909357f23e3348169e4f" dependencies = [ "anyhow", "chrono", @@ -2135,7 +2136,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=779775d5130ff7a4836f52f48b7e64d1479ee104#779775d5130ff7a4836f52f48b7e64d1479ee104" +source = "git+https://github.com/oxidecomputer/crucible?rev=65ca41e821ef53ec9c28909357f23e3348169e4f#65ca41e821ef53ec9c28909357f23e3348169e4f" dependencies = [ "crucible-workspace-hack", "libc", @@ -3915,9 +3916,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git2" -version = "0.19.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" +checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" dependencies = [ "bitflags 2.9.1", "libc", @@ -5634,9 +5635,9 @@ dependencies = [ [[package]] name = "libgit2-sys" -version = "0.17.0+1.8.1" +version = "0.18.2+1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" dependencies = [ "cc", "libc", @@ -7999,7 +8000,7 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client 0.10.0", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a)", "qorb", "rand 0.9.2", "range-requests", @@ -8426,7 +8427,7 @@ dependencies = [ "oximeter-producer", "oxnet", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a)", "propolis-mock-server", "propolis_api_types", "rand 0.9.2", @@ -10561,7 +10562,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "async-trait", "base64 0.21.7", @@ -10570,7 +10571,7 @@ dependencies = [ "progenitor 0.10.0", "progenitor-client 0.10.0", "propolis_api_types", - "rand 0.8.5", + "rand 0.9.2", "reqwest", "schemars", "serde", @@ -10606,7 +10607,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "anyhow", "atty", @@ -10618,7 +10619,7 @@ dependencies = [ "progenitor 0.10.0", "propolis_api_types", "propolis_types", - "rand 0.8.5", + "rand 0.9.2", "reqwest", "schemars", "semver 1.0.26", @@ -10650,7 +10651,7 @@ dependencies = [ [[package]] name = "propolis_api_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "crucible-client-types", "propolis_types", @@ -10663,7 +10664,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a#23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source = "git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a#827e6615bfebfd94d41504dcd1517a0f22e3166a" dependencies = [ "schemars", "serde", @@ -12454,7 +12455,7 @@ dependencies = [ "omicron-workspace-hack", "oxnet", "progenitor 0.10.0", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a)", "regress", "reqwest", "schemars", @@ -12532,7 +12533,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=23b06c2f452a97fac1dc12561d8451ce876d7c5a)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=827e6615bfebfd94d41504dcd1517a0f22e3166a)", "rcgen", "schemars", "serde", @@ -15105,29 +15106,31 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "8.3.2" +version = "9.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" +checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" dependencies = [ "anyhow", - "cargo_metadata 0.18.1", - "cfg-if", - "git2", + "cargo_metadata 0.19.2", + "derive_builder", "regex", "rustc_version 0.4.1", "rustversion", - "time", + "vergen-lib", ] [[package]] -name = "vergen" -version = "9.0.6" +name = "vergen-git2" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" +checksum = "4f6ee511ec45098eabade8a0750e76eec671e7fb2d9360c563911336bea9cac1" dependencies = [ "anyhow", "derive_builder", + "git2", "rustversion", + "time", + "vergen", "vergen-lib", ] @@ -15141,7 +15144,7 @@ dependencies = [ "derive_builder", "rustversion", "time", - "vergen 9.0.6", + "vergen", "vergen-lib", ] diff --git a/Cargo.toml b/Cargo.toml index 5c17d3ab786..91761edaa78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -409,10 +409,10 @@ crossterm = { version = "0.29.0", features = ["event-stream"] } # NOTE: if you change the pinned revision of the `crucible` dependencies, you # must also update the references in package-manifest.toml to match the new # revision. -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "779775d5130ff7a4836f52f48b7e64d1479ee104" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "779775d5130ff7a4836f52f48b7e64d1479ee104" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "779775d5130ff7a4836f52f48b7e64d1479ee104" } -crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "779775d5130ff7a4836f52f48b7e64d1479ee104" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "65ca41e821ef53ec9c28909357f23e3348169e4f" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "65ca41e821ef53ec9c28909357f23e3348169e4f" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "65ca41e821ef53ec9c28909357f23e3348169e4f" } +crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "65ca41e821ef53ec9c28909357f23e3348169e4f" } # NOTE: See above! csv = "1.3.1" curve25519-dalek = "4" @@ -637,10 +637,10 @@ progenitor-client = "0.10.0" # NOTE: if you change the pinned revision of the `bhyve_api` and propolis # dependencies, you must also update the references in package-manifest.toml to # match the new revision. -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "23b06c2f452a97fac1dc12561d8451ce876d7c5a" } -propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = "23b06c2f452a97fac1dc12561d8451ce876d7c5a" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "23b06c2f452a97fac1dc12561d8451ce876d7c5a" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "23b06c2f452a97fac1dc12561d8451ce876d7c5a" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "827e6615bfebfd94d41504dcd1517a0f22e3166a" } +propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = "827e6615bfebfd94d41504dcd1517a0f22e3166a" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "827e6615bfebfd94d41504dcd1517a0f22e3166a" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "827e6615bfebfd94d41504dcd1517a0f22e3166a" } # NOTE: see above! proptest = "1.7.0" qorb = "0.4.1" diff --git a/package-manifest.toml b/package-manifest.toml index 911f70ae33c..0297cba3e4e 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -594,10 +594,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "779775d5130ff7a4836f52f48b7e64d1479ee104" +source.commit = "65ca41e821ef53ec9c28909357f23e3348169e4f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "1b393ceb5685700e26da0b6b71bcb869e7456bd33bbfdf4afe2ea5899e77a1fa" +source.sha256 = "7a19eda420ebd1126a25746c2198ed58a62647c755a375c746e84351e651b278" output.type = "zone" output.intermediate_only = true @@ -606,10 +606,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "779775d5130ff7a4836f52f48b7e64d1479ee104" +source.commit = "65ca41e821ef53ec9c28909357f23e3348169e4f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "440da1e2c63c331bfa9e694bcc452c3bb55a915151eac022e7d810e99761aead" +source.sha256 = "e5dcf53aac3ddb5060663d2950837b3c4c81c68ede23b4ff5b1778cd1d4fb51e" output.type = "zone" output.intermediate_only = true @@ -623,10 +623,10 @@ service_name = "crucible_dtrace" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "779775d5130ff7a4836f52f48b7e64d1479ee104" +source.commit = "65ca41e821ef53ec9c28909357f23e3348169e4f" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-dtrace.sha256.txt -source.sha256 = "a3d429399d30b2f26f8194ebfa47df1191c684c4cf8cfbb3be18fa79556d3d10" +source.sha256 = "4ec3f612b0c10ef3372e22e99ef8170ab39d198f394b0e51d6c1065dc7d01b40" output.type = "tarball" # Refer to @@ -637,10 +637,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "23b06c2f452a97fac1dc12561d8451ce876d7c5a" +source.commit = "827e6615bfebfd94d41504dcd1517a0f22e3166a" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "ef5800987f38818f98942a1a6a3864858f81a258ca71cb5f908742a87d7a98e4" +source.sha256 = "242ecfab2c992a4587d19c19a1ad36338305a15af418233dcde231c614ccc2bc" output.type = "zone" [package.mg-ddm-gz] From d272063a56fa94dcb51d35ae143bbec9b1a0faac Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Tue, 23 Sep 2025 01:28:16 +0000 Subject: [PATCH 02/18] cargo hakari --- Cargo.lock | 2 ++ workspace-hack/Cargo.toml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e09dce5e60b..4514b5d78af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8677,6 +8677,8 @@ dependencies = [ "usdt", "usdt-impl", "uuid", + "vergen", + "vergen-lib", "winnow 0.6.26", "winnow 0.7.10", "x509-cert", diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 53d9b812ff0..d7a9ea41251 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -280,6 +280,8 @@ url = { version = "2.5.4", features = ["serde"] } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.18.1", features = ["serde", "v4"] } +vergen = { version = "9.0.6", features = ["cargo", "rustc"] } +vergen-lib = { version = "0.1.6", features = ["cargo", "git", "rustc"] } winnow-ca01ad9e24f5d932 = { package = "winnow", version = "0.7.10" } x509-cert = { version = "0.2.5" } zerocopy-c38e5c1d305a1b54 = { package = "zerocopy", version = "0.8.26", default-features = false, features = ["derive", "simd"] } From 628c84d1176bb3a8434cdc5a0c0866ec1c6afcaf Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 22 Sep 2025 19:12:14 -0700 Subject: [PATCH 03/18] (7/N) Use nexus_generation, update it (#8936) - Actually update nexus generation within the top-level blueprint and Nexus zones - Deploy new and old nexus zones concurrently # Blueprint Planner - Automatically determine nexus generation when provisioning new Nexus zones, based on existing deployed zones - Update the logic for provisioning nexus zones, to deploy old and new nexus images side-by-side - Update the logic for expunging nexus zones, to only do so when running from a "newer" nexus - Add a planning stage to bump the top-level "nexus generation", if appropriate, which would trigger the old Nexuses to quiesce. Fixes https://github.com/oxidecomputer/omicron/issues/8843, https://github.com/oxidecomputer/omicron/issues/8854 --- .../tests/input/cmds-target-release.txt | 31 +- .../output/cmds-mupdate-update-flow-stdout | 18 +- .../output/cmds-noop-image-source-stdout | 2 + .../tests/output/cmds-target-release-stdout | 342 ++- .../planning/src/blueprint_builder/builder.rs | 31 +- nexus/reconfigurator/planning/src/example.rs | 17 + nexus/reconfigurator/planning/src/planner.rs | 1915 ++++++++++++++--- nexus/types/src/deployment.rs | 33 + nexus/types/src/deployment/planning_report.rs | 152 ++ openapi/nexus-internal.json | 158 +- 10 files changed, 2326 insertions(+), 373 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 98dbc5c8115..8c10f204883 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -408,10 +408,29 @@ inventory-generate blueprint-plan latest latest blueprint-diff latest -# The previous step updated the last non-Nexus zone on the final sled. We should -# now see a blueprint where every in-service zone (other than Nexus) has an -# image source set to an artifact from our TUF repo. -blueprint-show latest +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate + +# Add Nexus zones on three sleds +blueprint-plan latest latest +blueprint-diff latest + +# Propagate configs to the sleds which should be running Nexus +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate -# We ought to update the inventory for the final sled and then step through -# the Nexus handoff process, but that work is still in progress. +# Update the Nexus generation from 1 -> 2, initiating +# quiesce of the old Nexuses +blueprint-plan latest latest +blueprint-diff latest + +# Expunge three Nexus zones, one at a time +blueprint-plan latest latest +blueprint-plan latest latest +blueprint-plan latest latest + +# Attempt to plan one more blueprint. +# There should be no changes attempted here. +blueprint-plan latest latest diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index c01d0cb859f..6f14309436f 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -511,6 +511,7 @@ planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -786,6 +787,7 @@ planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -914,6 +916,7 @@ planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1101,6 +1104,7 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: - current target release generation (3) is lower than minimum required by blueprint (4) - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1278,6 +1282,7 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: - current target release generation (3) is lower than minimum required by blueprint (4) - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1420,6 +1425,7 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: * zone adds and updates are blocked: - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1597,6 +1603,7 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: * zone adds and updates are blocked: - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1714,7 +1721,9 @@ planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) +* only placed 0/2 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1896,7 +1905,9 @@ planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) +* only placed 0/2 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2014,7 +2025,7 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01 (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f55647d4-5500-4ad3-893a-df45bd50d622 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }] generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: @@ -2023,6 +2034,8 @@ planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: * noop converting host phase 2 slot B to Artifact on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * noop converting host phase 2 slot B to Artifact on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting host phase 2 slot B to Artifact on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 +* only placed 0/2 desired nexus zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2262,6 +2275,7 @@ planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: - current target release generation (4) is lower than minimum required by blueprint (5) - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2393,7 +2407,9 @@ planner config: * adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zone placement waiting for NTP zones on sleds: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * missing NTP zone on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b +* only placed 0/1 desired nexus zones * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 1119aedabc7..ab1d658da34 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -189,6 +189,7 @@ planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -429,6 +430,7 @@ planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 * zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index e3f9c3539cb..30a7e7e967d 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -226,6 +226,7 @@ planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -286,6 +287,7 @@ planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -348,6 +350,7 @@ planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: * 1 pending MGS update: * model0:serial0: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -419,6 +422,7 @@ planning report for blueprint df06bb57-ad42-4431-9206-abff322896c7: * 1 pending MGS update: * model0:serial0: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -491,6 +495,7 @@ planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -612,6 +617,7 @@ planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -673,6 +679,7 @@ planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -733,6 +740,7 @@ planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -795,6 +803,7 @@ planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -868,6 +877,7 @@ planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -936,6 +946,7 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1008,6 +1019,7 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1076,6 +1088,7 @@ planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1147,6 +1160,7 @@ planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1216,6 +1230,7 @@ planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1352,6 +1367,7 @@ planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1426,6 +1442,7 @@ planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: * 1 pending MGS update: * model2:serial2: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1493,6 +1510,7 @@ planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1564,6 +1582,7 @@ planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1637,6 +1656,7 @@ planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: Some(B), expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1707,6 +1727,7 @@ planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: Some(B) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1775,6 +1796,7 @@ planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: * 1 pending MGS update: * model2:serial2: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1842,6 +1864,7 @@ planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: * 1 pending MGS update: * model2:serial2: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1975,6 +1998,7 @@ planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) * 25 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2105,6 +2129,7 @@ planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 62620961-fc4a-481e-968b-f5acbac0dc63 (internal_ntp) * 24 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2234,13 +2259,14 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: f83ade6d-9ab1-4679-813b-b9457e039c0b (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: f83ade6d-9ab1-4679-813b-b9457e039c0b (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint b2295597-5788-482e-acf9-1731ec63fbd2 based on parent blueprint 459a45a5-616e-421f-873b-2fb08c36205c blueprint source: planner with report: planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: * waiting for NTP zones to appear in inventory on sleds: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * missing NTP zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2371,6 +2397,7 @@ planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (external_dns) * 23 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2517,6 +2544,7 @@ planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: * discretionary zones placed: * external_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2664,6 +2692,7 @@ planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 86a22a56-0168-453d-9df1-cb2a7c64b5d3 (crucible) * 22 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2789,6 +2818,7 @@ planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (internal_dns) * 21 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -2938,6 +2968,7 @@ planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: * discretionary zones placed: * internal_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3088,6 +3119,7 @@ planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (crucible_pantry) * 20 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3225,6 +3257,7 @@ planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: * discretionary zones placed: * crucible_pantry zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3363,6 +3396,7 @@ planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone bd354eef-d8a6-4165-9124-283fb5e46d77 (crucible) * 19 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3493,6 +3527,7 @@ planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone e2fdefe7-95b2-4fd2-ae37-56929a06d58c (crucible) * 18 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3626,6 +3661,7 @@ planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 058fd5f9-60a8-4e11-9302-15172782e17d (crucible) * 17 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3744,6 +3780,7 @@ planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 427ec88f-f467-42fa-9bbb-66a91a36103c (internal_dns) * 16 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3882,6 +3919,7 @@ planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: * discretionary zones placed: * internal_dns zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4021,6 +4059,7 @@ planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (crucible) * 15 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4141,6 +4180,7 @@ planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 6444f8a5-6465-4f0b-a549-1993c113569c (internal_ntp) * 14 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4264,13 +4304,14 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: cc6fdaf4-0195-4cef-950d-7bacd7059787 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: cc6fdaf4-0195-4cef-950d-7bacd7059787 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 based on parent blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 blueprint source: planner with report: planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: * waiting for NTP zones to appear in inventory on sleds: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * missing NTP zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4401,6 +4442,7 @@ planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (external_dns) * 13 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4544,6 +4586,7 @@ planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: * discretionary zones placed: * external_dns zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4688,6 +4731,7 @@ planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (crucible_pantry) * 12 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4822,6 +4866,7 @@ planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: * discretionary zones placed: * crucible_pantry zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4957,6 +5002,7 @@ planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone dfac80b4-a887-430a-ae87-a4e065dba787 (crucible) * 11 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5087,6 +5133,7 @@ planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 694bd14f-cb24-4be4-bb19-876e79cda2c8 (crucible) * 10 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5205,6 +5252,7 @@ planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (crucible_pantry) * 9 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5331,6 +5379,7 @@ planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: * discretionary zones placed: * crucible_pantry zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5458,6 +5507,7 @@ planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 7c252b64-c5af-4ec1-989e-9a03f3b0f111 (crucible) * 8 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5577,6 +5627,7 @@ planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (internal_dns) * 7 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5714,6 +5765,7 @@ planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: * discretionary zones placed: * internal_dns zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5852,6 +5904,7 @@ planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (internal_ntp) * 6 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5977,13 +6030,14 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: d5fd048a-8786-42d3-938e-820eae95d7f4 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: d5fd048a-8786-42d3-938e-820eae95d7f4 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint 90650737-8142-47a6-9a48-a10efc487e57 based on parent blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 blueprint source: planner with report: planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: * waiting for NTP zones to appear in inventory on sleds: d81c6a84-79b8-4958-ae41-ea46c9b19763 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: d81c6a84-79b8-4958-ae41-ea46c9b19763 * missing NTP zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6116,6 +6170,7 @@ planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f55647d4-5500-4ad3-893a-df45bd50d622 (crucible) * 5 remaining out-of-date zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6240,6 +6295,7 @@ planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (external_dns) * 4 remaining out-of-date zones +* waiting to update top-level nexus_generation: new Nexus zones have not been planned yet @@ -6382,6 +6438,7 @@ planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: * discretionary zones placed: * external_dns zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: new Nexus zones have not been planned yet @@ -6502,14 +6559,44 @@ external DNS: -> # The previous step updated the last non-Nexus zone on the final sled. We should -> # now see a blueprint where every in-service zone (other than Nexus) has an -> # image source set to an artifact from our TUF repo. -> blueprint-show latest -blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 -parent: e8b088a8-7da0-480b-a2dc-75ffef068ece +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (810ea95a-4730-43dd-867e-1984aeb9d873) + +> inventory-generate +generated inventory collection 78486156-ea1a-42a2-adc3-658ccd94ccd1 from configured sleds + + +> # Add Nexus zones on three sleds +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d based on parent blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +blueprint source: planner with report: +planning report for blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d: +* discretionary zones placed: + * nexus zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 + * nexus zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 + * nexus zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 +* zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: new Nexus zones do not have database records yet + - sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 15) + +> blueprint-diff latest +from: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 15 -> 16): host phase 2 contents: ------------------------------ @@ -6538,8 +6625,8 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off @@ -6559,6 +6646,7 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_c11077d3-926e-479e-9f4e-299f6cf3bf29 5bb9ab35-13d4-4341-9548-855ce40a1269 in service none none off omicron zones: @@ -6578,10 +6666,10 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 ++ nexus c11077d3-926e-479e-9f4e-299f6cf3bf29 artifact: version 1.0.0 in service fd00:1122:3344:102::2c - - sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 14) + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 14 -> 15): host phase 2 contents: ------------------------------ @@ -6607,10 +6695,10 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -6629,6 +6717,7 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_923d968a-7d98-429b-8026-656d2e72af6a bc7fdd15-1a0a-4d95-a03a-112c027a5aca in service none none off omicron zones: @@ -6647,10 +6736,10 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 ++ nexus 923d968a-7d98-429b-8026-656d2e72af6a artifact: version 1.0.0 in service fd00:1122:3344:101::2b - - sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 14) + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 14 -> 15): host phase 2 contents: ------------------------------ @@ -6676,10 +6765,10 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off @@ -6698,6 +6787,7 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_3e2f206f-61d2-4dee-91e6-8fbcaf251df6 d51d6234-591b-49ee-9115-a44eec86d42d in service none none off omicron zones: @@ -6716,36 +6806,214 @@ parent: e8b088a8-7da0-480b-a2dc-75ffef068ece internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 ++ nexus 3e2f206f-61d2-4dee-91e6-8fbcaf251df6 artifact: version 1.0.0 in service fd00:1122:3344:103::2b COCKROACHDB SETTINGS: - state fingerprint::::::::::::::::: (none) - cluster.preserve_downgrade_option: (do not modify) + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: - generation: 1 - read from:: SingleNode + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 3e2f206f-61d2-4dee-91e6-8fbcaf251df6.host (records: 1) ++ AAAA fd00:1122:3344:103::2b ++ name: 923d968a-7d98-429b-8026-656d2e72af6a.host (records: 1) ++ AAAA fd00:1122:3344:101::2b +* name: _nexus._tcp (records: 3 -> 6) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12221 3e2f206f-61d2-4dee-91e6-8fbcaf251df6.host.control-plane.oxide.internal ++ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 923d968a-7d98-429b-8026-656d2e72af6a.host.control-plane.oxide.internal ++ SRV port 12221 c11077d3-926e-479e-9f4e-299f6cf3bf29.host.control-plane.oxide.internal ++ name: c11077d3-926e-479e-9f4e-299f6cf3bf29.host (records: 1) ++ AAAA fd00:1122:3344:102::2c + unchanged names: 50 (records: 62) + +external DNS: +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 3 -> 6) +- A 192.0.2.2 +- A 192.0.2.3 +- A 192.0.2.4 ++ A 192.0.2.2 ++ A 192.0.2.7 ++ A 192.0.2.3 ++ A 192.0.2.6 ++ A 192.0.2.5 ++ A 192.0.2.4 + unchanged names: 4 (records: 6) + + + + +> # Propagate configs to the sleds which should be running Nexus +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) + +> inventory-generate +generated inventory collection 0f80a0f2-360f-4c64-9c35-3dc067dd2620 from configured sleds + + +> # Update the Nexus generation from 1 -> 2, initiating +> # quiesce of the old Nexuses +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 9, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e based on parent blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d +blueprint source: planner with report: +planning report for blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e: +* 3 remaining out-of-date zones +* 3 zones waiting to be expunged: + * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus): image out-of-date, but zone's nexus_generation 1 is still active +* updating top-level nexus_generation to: 2 + + + +> blueprint-diff latest +from: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d +to: blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) METADATA: - created by::::::::::::: reconfigurator-sim - created at::::::::::::: - comment:::::::::::::::: (none) - internal DNS version::: 1 - external DNS version::: 1 - target release min gen: 1 - nexus gen:::::::::::::: 1 + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) +* nexus gen:::::::::::::: 1 -> 2 + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) - PENDING MGS-MANAGED UPDATES: 0 +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 54 (records: 71) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 12) + + + + +> # Expunge three Nexus zones, one at a time +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 9, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint d69e1109-06be-4469-8876-4292dc7885d7 based on parent blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e blueprint source: planner with report: -planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: -* discretionary zones placed: - * external_dns zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 -* zone updates waiting on discretionary zones +planning report for blueprint d69e1109-06be-4469-8876-4292dc7885d7: +* 1 out-of-date zone expunged: + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus) +* 3 remaining out-of-date zones +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 based on parent blueprint d69e1109-06be-4469-8876-4292dc7885d7 +blueprint source: planner with report: +planning report for blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* 1 out-of-date zone expunged: + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus) +* 2 remaining out-of-date zones + + + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 based on parent blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 +blueprint source: planner with report: +planning report for blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus) +* 1 remaining out-of-date zone + + + + +> # Attempt to plan one more blueprint. +> # There should be no changes attempted here. +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 based on parent blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 +blueprint source: planner with report: +planning report for blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts + -> # We ought to update the inventory for the final sled and then step through -> # the Nexus handoff process, but that work is still in progress. diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 7773d1f45f9..bce54bd528f 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -115,6 +115,10 @@ pub enum Error { NoAvailableZpool { sled_id: SledUuid, kind: ZoneKind }, #[error("no Nexus zones exist in parent blueprint")] NoNexusZonesInParentBlueprint, + #[error("no active Nexus zones exist in parent blueprint")] + NoActiveNexusZonesInParentBlueprint, + #[error("conflicting values for active Nexus zones in parent blueprint")] + ActiveNexusZoneGenerationConflictInParentBlueprint, #[error("no Boundary NTP zones exist in parent blueprint")] NoBoundaryNtpZonesInParentBlueprint, #[error( @@ -726,6 +730,10 @@ impl<'a> BlueprintBuilder<'a> { self.sled_editors.keys().copied() } + /// Iterates over all zones on a sled. + /// + /// This will include both zones from the parent blueprint, as well + /// as the changes made within this builder. pub fn current_sled_zones( &self, sled_id: SledUuid, @@ -740,6 +748,25 @@ impl<'a> BlueprintBuilder<'a> { Either::Right(editor.zones(filter)) } + /// Iterates over all zones on all sleds. + /// + /// Acts like a combination of [`Self::sled_ids_with_zones`] and + /// [`Self::current_sled_zones`]. + /// + /// This will include both zones from the parent blueprint, as well + /// as the changes made within this builder. + pub fn current_zones( + &'a self, + filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintZoneDisposition) -> bool + Clone, + { + self.sled_ids_with_zones().flat_map(move |sled_id| { + self.current_sled_zones(sled_id, filter.clone()) + }) + } + pub fn current_sled_disks( &self, sled_id: SledUuid, @@ -1537,6 +1564,7 @@ impl<'a> BlueprintBuilder<'a> { Ok(Ensure::Added) } + /// Adds a nexus zone on this sled. pub fn sled_add_zone_nexus( &mut self, sled_id: SledUuid, @@ -1565,6 +1593,7 @@ impl<'a> BlueprintBuilder<'a> { _ => None, }) .ok_or(Error::NoNexusZonesInParentBlueprint)?; + self.sled_add_zone_nexus_with_config( sled_id, external_tls, @@ -3337,7 +3366,7 @@ pub mod test { .map(|sa| sa.sled_id) .expect("no sleds present"), BlueprintZoneImageSource::InstallDataset, - Generation::new(), + parent.nexus_generation, ) .unwrap_err(); diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 69ed6a8bb48..31e4d1d61e8 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -4,6 +4,7 @@ //! Example blueprints +use std::collections::BTreeSet; use std::collections::HashMap; use std::fmt; use std::hash::Hash; @@ -27,6 +28,7 @@ use nexus_types::deployment::BlueprintArtifactVersion; use nexus_types::deployment::BlueprintHostPhase2DesiredContents; use nexus_types::deployment::BlueprintHostPhase2DesiredSlots; use nexus_types::deployment::BlueprintSource; +use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::ExpectedVersion; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; @@ -695,6 +697,21 @@ impl ExampleSystemBuilder { } let blueprint = builder.build(BlueprintSource::Test); + + // Find and set the set of active Nexuses + let active_nexus_zone_ids: BTreeSet<_> = blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_, zone, nexus_zone)| { + if nexus_zone.nexus_generation == blueprint.nexus_generation { + Some(zone.id) + } else { + None + } + }) + .collect(); + input_builder.set_active_nexus_zones(active_nexus_zone_ids.clone()); + system.set_active_nexus_zones(active_nexus_zone_ids); + for sled_cfg in blueprint.sleds.values() { for zone in sled_cfg.zones.iter() { let service_id = zone.id; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index c1c8a39a53c..885908c7208 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -32,6 +32,7 @@ use nexus_types::deployment::BlueprintSource; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneImageSource; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::CockroachDbSettings; @@ -43,17 +44,19 @@ use nexus_types::deployment::SledFilter; use nexus_types::deployment::TufRepoContentsError; use nexus_types::deployment::ZpoolFilter; use nexus_types::deployment::{ - CockroachdbUnsafeToShutdown, PlanningAddStepReport, - PlanningCockroachdbSettingsStepReport, PlanningDecommissionStepReport, - PlanningExpungeStepReport, PlanningMgsUpdatesStepReport, + CockroachdbUnsafeToShutdown, NexusGenerationBumpWaitingOn, + PlanningAddStepReport, PlanningCockroachdbSettingsStepReport, + PlanningDecommissionStepReport, PlanningExpungeStepReport, + PlanningMgsUpdatesStepReport, PlanningNexusGenerationBumpReport, PlanningNoopImageSourceStepReport, PlanningReport, PlanningZoneUpdatesStepReport, ZoneAddWaitingOn, ZoneUnsafeToShutdown, - ZoneUpdatesWaitingOn, + ZoneUpdatesWaitingOn, ZoneWaitingToExpunge, }; use nexus_types::external_api::views::PhysicalDiskPolicy; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; +use omicron_common::api::external::Generation; use omicron_common::disk::M2Slot; use omicron_common::policy::BOUNDARY_NTP_REDUNDANCY; use omicron_common::policy::COCKROACHDB_REDUNDANCY; @@ -113,6 +116,41 @@ const NUM_CONCURRENT_MGS_UPDATES: usize = 1; /// A receipt that `check_input_validity` has been run prior to planning. struct InputChecked; +// Details of how a zone's status differs between the blueprint and the sled +// inventory +#[derive(Debug)] +#[expect(dead_code)] +struct ZonePropagationIncomplete<'a> { + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, + reason: ZonePropagationStatus<'a>, +} + +#[derive(Debug)] +#[expect(dead_code)] +enum ZonePropagationStatus<'a> { + // The current blueprint and the sled inventory disagree + // about the image source for a zone. + // + // This can mean that the sled inventory is out-of-date, or + // that a different blueprint has been applied. + ImageSourceMismatch { + bp_image_source: &'a BlueprintZoneImageSource, + inv_image_source: &'a OmicronZoneImageSource, + }, + // Although this zone appears in the blueprint, it does + // not exist on the sled's inventory. + MissingInInventory { + bp_image_source: &'a BlueprintZoneImageSource, + }, + // The last reconciliation attempt for this zone failed + ReconciliationError { + bp_image_source: &'a BlueprintZoneImageSource, + inv_image_source: &'a OmicronZoneImageSource, + message: &'a str, + }, +} + pub struct Planner<'a> { log: Logger, input: &'a PlanningInput, @@ -233,6 +271,10 @@ impl<'a> Planner<'a> { self.do_plan_zone_updates(&mgs_updates)? }; + // We may need to bump the top-level Nexus generation number + // to update Nexus zones. + let nexus_generation_bump = self.do_plan_nexus_generation_update()?; + // CockroachDB settings aren't dependent on zones, so they can be // planned independently of the rest of the system. let cockroachdb_settings = self.do_plan_cockroachdb_settings(); @@ -246,6 +288,7 @@ impl<'a> Planner<'a> { add, mgs_updates, zone_updates, + nexus_generation_bump, cockroachdb_settings, }) } @@ -973,58 +1016,119 @@ impl<'a> Planner<'a> { DiscretionaryOmicronZone::CruciblePantry, DiscretionaryOmicronZone::InternalDns, DiscretionaryOmicronZone::ExternalDns, - DiscretionaryOmicronZone::Nexus, DiscretionaryOmicronZone::Oximeter, + // Nexus only wants placement if no other zones are pending - leave + // it last in this list so it has visibility into the placement of + // other discretionary zones. + DiscretionaryOmicronZone::Nexus, ] { - let num_zones_to_add = - self.num_additional_zones_needed(zone_kind, report); - if num_zones_to_add == 0 { - continue; - } - // We need to add at least one zone; construct our `zone_placement` - // (or reuse the existing one if a previous loop iteration already - // created it). - let zone_placement = zone_placement.get_or_insert_with(|| { - // This constructs a picture of the sleds as we currently - // understand them, as far as which sleds have discretionary - // zones. This will remain valid as we loop through the - // `zone_kind`s in this function, as any zone additions will - // update the `zone_placement` heap in-place. - let current_discretionary_zones = self - .input - .all_sled_resources(SledFilter::Discretionary) - .filter(|(sled_id, _)| { - !report.sleds_waiting_for_ntp_zone.contains(&sled_id) - }) - .map(|(sled_id, sled_resources)| { - OmicronZonePlacementSledState { - sled_id, - num_zpools: sled_resources - .all_zpools(ZpoolFilter::InService) - .count(), - discretionary_zones: self - .blueprint - .current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) - .filter_map(|zone| { - DiscretionaryOmicronZone::from_zone_type( - &zone.zone_type, + // Our goal here is to make sure that if we have less redundancy for + // discretionary zones than needed, we deploy additional zones. + // + // For most zone types, we only care about the total count of that + // kind of zone, regardless of image. In contrast, for Nexus, we may + // need to reach a minimum redundancy count for multiple zone images + // (new and old) during a handoff. + let image_sources = match zone_kind { + DiscretionaryOmicronZone::Nexus => { + let new_repo = self.input.tuf_repo().description(); + let new_image = + new_repo.zone_image_source(zone_kind.into())?; + + let mut images = vec![]; + let nexus_in_charge_image = + self.lookup_current_nexus_image()?; + + // Verify that all zones other than Nexus are updated and + // using the new image. Note that this is considering both + // zones from the parent blueprint, as well as zones + // that we might be adding in this current planning pass. + let all_non_nexus_zones_updated = + report.discretionary_zones_placed.is_empty() + && self.all_non_nexus_zones_using_new_image()?; + let new_image_is_different = + nexus_in_charge_image != new_image; + + // We may still want to deploy the old image alongside + // the new image: if we're running the "old version of a + // Nexus" currently, we need to ensure we have + // redundancy before the handoff completes. + images.push(nexus_in_charge_image); + + // If all other zones are using their new images, ensure we + // start Nexus zones from their new image. + // + // NOTE: Checking `all_non_nexus_zones_updated` shouldn't be + // strictly necessary! It should be fine to launch the new + // Nexus before other zone updates; due to Nexus handoff + // implementation, it should boot and remain idle. + if all_non_nexus_zones_updated && new_image_is_different { + images.push(new_image); + } + images + } + _ => { + vec![self.image_source_for_new_zone( + zone_kind.into(), + mgs_updates, + )?] + } + }; + + for image_source in image_sources { + let num_zones_to_add = self.num_additional_zones_needed( + zone_kind, + &image_source, + report, + ); + if num_zones_to_add == 0 { + continue; + } + // We need to add at least one zone; construct our `zone_placement` + // (or reuse the existing one if a previous loop iteration already + // created it). + let zone_placement = zone_placement.get_or_insert_with(|| { + // This constructs a picture of the sleds as we currently + // understand them, as far as which sleds have discretionary + // zones. This will remain valid as we loop through the + // `zone_kind`s in this function, as any zone additions will + // update the `zone_placement` heap in-place. + let current_discretionary_zones = self + .input + .all_sled_resources(SledFilter::Discretionary) + .filter(|(sled_id, _)| { + !report.sleds_waiting_for_ntp_zone.contains(&sled_id) + }) + .map(|(sled_id, sled_resources)| { + OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_resources + .all_zpools(ZpoolFilter::InService) + .count(), + discretionary_zones: self + .blueprint + .current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, ) - }) - .collect(), - } - }); - OmicronZonePlacement::new(current_discretionary_zones) - }); - self.add_discretionary_zones( - zone_placement, - zone_kind, - num_zones_to_add, - mgs_updates, - report, - )?; + .filter_map(|zone| { + DiscretionaryOmicronZone::from_zone_type( + &zone.zone_type, + ) + }) + .collect(), + } + }); + OmicronZonePlacement::new(current_discretionary_zones) + }); + self.add_discretionary_zones( + zone_placement, + zone_kind, + num_zones_to_add, + image_source, + report, + )?; + } } Ok(()) @@ -1034,7 +1138,8 @@ impl<'a> Planner<'a> { /// additional zones needed of the given `zone_kind` to satisfy the policy. fn num_additional_zones_needed( &mut self, - zone_kind: DiscretionaryOmicronZone, + discretionary_zone_kind: DiscretionaryOmicronZone, + image_source: &BlueprintZoneImageSource, report: &mut PlanningAddStepReport, ) -> usize { // Count the number of `kind` zones on all in-service sleds. This @@ -1043,7 +1148,7 @@ impl<'a> Planner<'a> { // decommissioned. let mut num_existing_kind_zones = 0; for sled_id in self.input.all_sled_ids(SledFilter::InService) { - let zone_kind = ZoneKind::from(zone_kind); + let zone_kind = ZoneKind::from(discretionary_zone_kind); // Internal DNS is special: if we have an expunged internal DNS zone // that might still be running, we want to count it here: we can't @@ -1058,11 +1163,20 @@ impl<'a> Planner<'a> { num_existing_kind_zones += self .blueprint .current_sled_zones(sled_id, disposition_filter) - .filter(|z| z.zone_type.kind() == zone_kind) + .filter(|z| { + let matches_kind = z.zone_type.kind() == zone_kind; + match discretionary_zone_kind { + DiscretionaryOmicronZone::Nexus => { + let matches_image = z.image_source == *image_source; + matches_kind && matches_image + } + _ => matches_kind, + } + }) .count(); } - let target_count = match zone_kind { + let target_count = match discretionary_zone_kind { DiscretionaryOmicronZone::BoundaryNtp => { self.input.target_boundary_ntp_zone_count() } @@ -1104,7 +1218,7 @@ impl<'a> Planner<'a> { target_count.saturating_sub(num_existing_kind_zones); if num_zones_to_add == 0 { report.sufficient_zones_exist( - ZoneKind::from(zone_kind).report_str(), + ZoneKind::from(discretionary_zone_kind).report_str(), target_count, num_existing_kind_zones, ); @@ -1122,7 +1236,7 @@ impl<'a> Planner<'a> { zone_placement: &mut OmicronZonePlacement, kind: DiscretionaryOmicronZone, num_zones_to_add: usize, - mgs_updates: &PlanningMgsUpdatesStepReport, + image_source: BlueprintZoneImageSource, report: &mut PlanningAddStepReport, ) -> Result<(), Error> { for i in 0..num_zones_to_add { @@ -1142,9 +1256,7 @@ impl<'a> Planner<'a> { } }; - let image = - self.image_source_for_new_zone(kind.into(), mgs_updates)?; - let image_source = image.clone(); + let image = image_source.clone(); match kind { DiscretionaryOmicronZone::BoundaryNtp => { self.blueprint.sled_promote_internal_ntp_to_boundary_ntp( @@ -1173,16 +1285,12 @@ impl<'a> Planner<'a> { self.blueprint.sled_add_zone_external_dns(sled_id, image)? } DiscretionaryOmicronZone::Nexus => { - // TODO: omicron#8936 will fix this to determine the - // appropriate generation to use here. For now, we use the - // current generation. That does mean that in the middle of - // a handoff, we'll deploy the *new* version, but that's a - // very unlikely case to get here and will be fixed by - // omicron#8936. + let nexus_generation = + self.determine_nexus_generation(&image)?; self.blueprint.sled_add_zone_nexus( sled_id, image, - self.blueprint.nexus_generation(), + nexus_generation, )? } DiscretionaryOmicronZone::Oximeter => { @@ -1199,6 +1307,64 @@ impl<'a> Planner<'a> { Ok(()) } + // Determines the appropriate generation number for a new Nexus zone. + // This generation is based on the generation number used by existing + // Nexus zones. + // + // The logic is: + // - If any existing Nexus zone has the same image source, reuse its generation + // - Otherwise, use the highest existing generation + 1 + // - If no existing zones exist, return an error + // + // This function also validates that the determined generation matches the + // top-level current blueprint generation. + fn determine_nexus_generation( + &self, + image_source: &BlueprintZoneImageSource, + ) -> Result { + // If any other Nexus in the blueprint has the same image source, + // use it. Otherwise, use the highest generation number + 1. + let mut highest_seen_generation = None; + let mut same_image_nexus_generation = None; + + // Iterate over both existing zones and ones that are actively being placed. + for (zone, nexus) in self + .blueprint + .current_zones(BlueprintZoneDisposition::any) + .filter_map(|z| match &z.zone_type { + BlueprintZoneType::Nexus(nexus) => Some((z, nexus)), + _ => None, + }) + { + if zone.image_source == *image_source { + // If the image matches exactly, use it. + same_image_nexus_generation = Some(nexus.nexus_generation); + break; + } else if let Some(gen) = highest_seen_generation { + // Otherwise, use the generation number if it's the highest + // we've seen + if nexus.nexus_generation > gen { + highest_seen_generation = Some(nexus.nexus_generation); + } + } else { + // Use it regardless if it's the first generation number we've + // seen + highest_seen_generation = Some(nexus.nexus_generation); + } + } + + let determined_generation = match same_image_nexus_generation { + Some(gen) => Some(gen), + None => highest_seen_generation.map(|gen| gen.next()), + }; + + let Some(gen) = determined_generation else { + return Err(Error::NoNexusZonesInParentBlueprint); + }; + + Ok(gen) + } + /// Update at most one MGS-managed device (SP, RoT, etc.), if any are out of /// date. fn do_plan_mgs_updates( @@ -1274,13 +1440,12 @@ impl<'a> Planner<'a> { Ok(PlanningMgsUpdatesStepReport::new(pending_updates)) } - /// Update at most one existing zone to use a new image source. - fn do_plan_zone_updates( - &mut self, - mgs_updates: &PlanningMgsUpdatesStepReport, - ) -> Result { - let mut report = PlanningZoneUpdatesStepReport::new(); - + // Returns the zones which appear in the blueprint on commissioned sleds, + // but which have not been reported by the latest reconciliation result from + // inventory. + fn get_zones_not_yet_propagated_to_inventory( + &self, + ) -> Vec> { // We are only interested in non-decommissioned sleds. let sleds = self .input @@ -1295,31 +1460,7 @@ impl<'a> Planner<'a> { .map(|(z, sa_result)| (z.id, (&z.image_source, sa_result))) .collect::>(); - #[derive(Debug)] - #[expect(dead_code)] - struct ZoneCurrentlyUpdating<'a> { - zone_id: OmicronZoneUuid, - zone_kind: ZoneKind, - reason: UpdatingReason<'a>, - } - - #[derive(Debug)] - #[expect(dead_code)] - enum UpdatingReason<'a> { - ImageSourceMismatch { - bp_image_source: &'a BlueprintZoneImageSource, - inv_image_source: &'a OmicronZoneImageSource, - }, - MissingInInventory { - bp_image_source: &'a BlueprintZoneImageSource, - }, - ReconciliationError { - bp_image_source: &'a BlueprintZoneImageSource, - inv_image_source: &'a OmicronZoneImageSource, - message: &'a str, - }, - } - + let mut updating = vec![]; for &sled_id in &sleds { // Build a list of zones currently in the blueprint but where // inventory has a mismatch or does not know about the zone. @@ -1327,7 +1468,7 @@ impl<'a> Planner<'a> { // What about the case where a zone is in inventory but not in the // blueprint? See // https://github.com/oxidecomputer/omicron/issues/8589. - let zones_currently_updating = self + let mut zones_currently_updating = self .blueprint .current_sled_zones( sled_id, @@ -1350,13 +1491,14 @@ impl<'a> Planner<'a> { ConfigReconcilerInventoryResult::Ok, )) => { // The inventory and blueprint image sources differ. - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::ImageSourceMismatch { - bp_image_source: &zone.image_source, - inv_image_source, - }, + reason: + ZonePropagationStatus::ImageSourceMismatch { + bp_image_source: &zone.image_source, + inv_image_source, + }, }) } Some(( @@ -1366,44 +1508,136 @@ impl<'a> Planner<'a> { // The inventory reports this zone but there was an // error reconciling it (most likely an error // starting the zone). - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::ReconciliationError { - bp_image_source: &zone.image_source, - inv_image_source, - message, - }, + reason: + ZonePropagationStatus::ReconciliationError { + bp_image_source: &zone.image_source, + inv_image_source, + message, + }, }) } None => { // The blueprint has a zone that inventory does not have. - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::MissingInInventory { - bp_image_source: &zone.image_source, - }, + reason: + ZonePropagationStatus::MissingInInventory { + bp_image_source: &zone.image_source, + }, }) } } }) .collect::>(); + updating.append(&mut zones_currently_updating); + } + updating + } - if !zones_currently_updating.is_empty() { - info!( - self.log, "some zones not yet up-to-date"; - "sled_id" => %sled_id, - "zones_currently_updating" => ?zones_currently_updating, - ); - return Ok(report); - } + /// Update at most one existing zone to use a new image source. + fn do_plan_zone_updates( + &mut self, + mgs_updates: &PlanningMgsUpdatesStepReport, + ) -> Result { + let mut report = PlanningZoneUpdatesStepReport::new(); + + let zones_currently_updating = + self.get_zones_not_yet_propagated_to_inventory(); + if !zones_currently_updating.is_empty() { + info!( + self.log, "some zones not yet up-to-date"; + "zones_currently_updating" => ?zones_currently_updating, + ); + return Ok(report); + } + + // Find the zones with out-of-date images + let out_of_date_zones = self.get_out_of_date_zones(); + for (sled_id, zone, desired_image) in out_of_date_zones.iter() { + report.out_of_date_zone(*sled_id, zone, desired_image.clone()); + } + + // Of the out-of-date zones, filter out zones that can't be updated yet, + // either because they're not ready or because it wouldn't be safe to + // bounce them. + let (nexus_updateable_zones, non_nexus_updateable_zones): ( + Vec<_>, + Vec<_>, + ) = out_of_date_zones + .into_iter() + .filter(|(_, zone, _)| { + self.are_zones_ready_for_updates(mgs_updates) + && self.can_zone_be_shut_down_safely(&zone, &mut report) + }) + .partition(|(_, zone, _)| zone.zone_type.is_nexus()); + + // Try to update the first non-Nexus zone + if let Some((sled_id, zone, new_image_source)) = + non_nexus_updateable_zones.first() + { + return self.update_or_expunge_zone( + *sled_id, + zone, + new_image_source.clone(), + report, + ); + } + + // If the only remaining out-of-date zones are Nexus, verify that + // handoff has occurred before attempting to expunge them. + // + // We intentionally order this after other zone updates to minimize + // the window where we might report "waiting to update Nexus" if + // handoff has not occurred yet, and we iterate over all Nexus + // zones with out-of-date images to fill out the planning report. + let nexus_updateable_zones = nexus_updateable_zones + .into_iter() + .filter_map(|(sled, zone, image)| { + match self.should_nexus_zone_be_expunged(&zone, &mut report) { + Ok(true) => Some(Ok((sled, zone, image))), + Ok(false) => None, + Err(err) => Some(Err(err)), + } + }) + .collect::, Error>>()?; + + if let Some((sled_id, zone, new_image_source)) = + nexus_updateable_zones.first() + { + return self.update_or_expunge_zone( + *sled_id, + zone, + new_image_source.clone(), + report, + ); } + // No zones to update. + Ok(report) + } + + // Returns zones that should (eventually) be updated because their image + // appears different in the target release. + // + // Does not consider whether or not it's safe to shut down these zones. + fn get_out_of_date_zones( + &self, + ) -> Vec<(SledUuid, BlueprintZoneConfig, BlueprintZoneImageSource)> { + // We are only interested in non-decommissioned sleds. + let sleds = self + .input + .all_sleds(SledFilter::Commissioned) + .map(|(id, _details)| id) + .collect::>(); + let target_release = self.input.tuf_repo().description(); + // Find out of date zones, as defined by zones whose image source does // not match what it should be based on our current target release. - let target_release = self.input.tuf_repo().description(); - let out_of_date_zones = sleds + sleds .into_iter() .flat_map(|sled_id| { let log = &self.log; @@ -1437,54 +1671,7 @@ impl<'a> Planner<'a> { } }) }) - .collect::>(); - - for (sled_id, zone, desired_image) in out_of_date_zones.iter() { - report.out_of_date_zone(*sled_id, zone, desired_image.clone()); - } - - // Of the out-of-date zones, filter out zones that can't be updated yet, - // either because they're not ready or because it wouldn't be safe to - // bounce them. - let mut updateable_zones = out_of_date_zones.iter().filter( - |(_sled_id, zone, _new_image_source)| { - if !self.can_zone_be_shut_down_safely(zone, &mut report) { - return false; - } - match self.is_zone_ready_for_update( - zone.zone_type.kind(), - mgs_updates, - ) { - Ok(true) => true, - Ok(false) => false, - Err(err) => { - // If we can't tell whether a zone is ready for update, - // assume it can't be. - warn!( - self.log, - "cannot determine whether zone is ready for update"; - "zone" => ?zone, - InlineErrorChain::new(&err), - ); - false - } - } - }, - ); - - if let Some((sled_id, zone, new_image_source)) = updateable_zones.next() - { - // Update the first out-of-date zone. - self.update_or_expunge_zone( - *sled_id, - zone, - new_image_source.clone(), - report, - ) - } else { - // No zones to update. - Ok(report) - } + .collect::>() } /// Update a zone to use a new image source, either in-place or by @@ -1773,6 +1960,141 @@ impl<'a> Planner<'a> { Ok(reasons) } + // Determines whether or not the top-level "nexus_generation" + // value should be increased. + // + // Doing so will be a signal for all running Nexus instances at + // lower versions to start quiescing, and to perform handoff. + fn do_plan_nexus_generation_update( + &mut self, + ) -> Result { + let mut report = PlanningNexusGenerationBumpReport::new(); + + // Nexus can only be updated if all non-Nexus zones have been + // updated, i.e., their image source is an artifact from the new + // repo. + let new_repo = self.input.tuf_repo().description(); + + // If we don't actually have a TUF repo here, we can't do + // updates anyway; any return value is fine. + if new_repo.tuf_repo().is_none() { + return Ok(report); + } + + // Check that all in-service zones (other than Nexus) on all + // sleds have an image source consistent with `new_repo`. + if !self.all_non_nexus_zones_using_new_image()? { + report.set_waiting_on( + NexusGenerationBumpWaitingOn::FoundOldNonNexusZones, + ); + return Ok(report); + } + + // In order to do a handoff, there must be Nexus instances at the + // proposed generation number. These Nexuses must also have records in + // "db_metadata_nexus" (this is verified by checking that new Nexuses + // have entries in "self.input.not_yet_nexus_zones"). + let current_generation = self.blueprint.nexus_generation(); + let proposed_generation = self.blueprint.nexus_generation().next(); + let mut old_nexuses_at_current_gen = 0; + let mut nexuses_at_proposed_gen = 0; + let mut nexuses_at_proposed_gen_missing_metadata_record = 0; + for sled_id in self.blueprint.sled_ids_with_zones() { + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + if let BlueprintZoneType::Nexus(nexus_zone) = &z.zone_type { + if nexus_zone.nexus_generation == proposed_generation { + nexuses_at_proposed_gen += 1; + if !self.input.not_yet_nexus_zones().contains(&z.id) { + nexuses_at_proposed_gen_missing_metadata_record += + 1; + } + } + + if nexus_zone.nexus_generation == current_generation + && z.image_source + != new_repo.zone_image_source(z.zone_type.kind())? + { + old_nexuses_at_current_gen += 1; + } + } + } + } + + if old_nexuses_at_current_gen == 0 { + // If all the current-generation Nexuses are "up-to-date", then we + // have completed a handoff successfully. + // + // In this case, there's nothing to report. + // + // Note that we'll continue to hit this case until the next update + // starts (in other words, then `new_repo` changes). + return Ok(report); + } + + if nexuses_at_proposed_gen < self.input.target_nexus_zone_count() { + // If there aren't enough Nexuses at the next generation, quiescing + // could be a dangerous operation. Blueprint execution should be + // able to continue even if the new Nexuses haven't started, but to + // be conservative, we'll wait for the target count. + report.set_waiting_on( + NexusGenerationBumpWaitingOn::MissingNewNexusInBlueprint, + ); + return Ok(report); + } + + if nexuses_at_proposed_gen_missing_metadata_record > 0 { + // There are enough Nexuses at the target generation, but not all of + // them have records yet. Blueprint execution should fix this, by + // creating these records. + report.set_waiting_on( + NexusGenerationBumpWaitingOn::MissingNexusDatabaseAccessRecords, + ); + return Ok(report); + } + + // Confirm that all blueprint zones have propagated to inventory + let zones_currently_updating = + self.get_zones_not_yet_propagated_to_inventory(); + if !zones_currently_updating.is_empty() { + info!( + self.log, "some zones not yet up-to-date"; + "zones_currently_updating" => ?zones_currently_updating, + ); + report.set_waiting_on( + NexusGenerationBumpWaitingOn::MissingNewNexusInInventory, + ); + return Ok(report); + } + + // If we're here: + // - There's a new repo + // - The current generation of Nexuses are not running an image from the + // new repo (which means they are "older" from the perspective of the + // update system) + // - There are Nexuses running with "current generation + 1" + // - Those new Nexuses have database metadata records that will let + // them boot successfully + // - All non-Nexus zones have updated (i.e., are running images from the + // new repo) + // - All other blueprint zones have propagated to inventory + // + // If all of these are true, the "zone update" portion of the planner + // has completed, aside from Nexus, and we're ready for old Nexuses + // to start quiescing. + // + // Blueprint planning and execution will be able to continue past this + // point, for the purposes of restoring redundancy, expunging sleds, + // etc. However, making this committment will also halt the creation of + // new sagas temporarily, as handoff from old to new Nexuses occurs. + self.blueprint.set_nexus_generation(proposed_generation); + report.set_next_generation(proposed_generation); + + Ok(report) + } + fn do_plan_cockroachdb_settings( &mut self, ) -> PlanningCockroachdbSettingsStepReport { @@ -1873,63 +2195,127 @@ impl<'a> Planner<'a> { zone_kind: ZoneKind, mgs_updates: &PlanningMgsUpdatesStepReport, ) -> Result { - let source_repo = - if self.is_zone_ready_for_update(zone_kind, mgs_updates)? { - self.input.tuf_repo().description() - } else { - self.input.old_repo().description() - }; + let source_repo = if self.are_zones_ready_for_updates(mgs_updates) { + self.input.tuf_repo().description() + } else { + self.input.old_repo().description() + }; source_repo.zone_image_source(zone_kind) } - /// Return `true` iff a zone of the given kind is ready to be updated; - /// i.e., its dependencies have been updated. - fn is_zone_ready_for_update( + /// Return `true` iff a zone is ready to be updated; i.e., its dependencies + /// have been updated. + fn are_zones_ready_for_updates( &self, - zone_kind: ZoneKind, mgs_updates: &PlanningMgsUpdatesStepReport, - ) -> Result { - // We return false regardless of `zone_kind` if there are still + ) -> bool { + // We return false for all zone kinds if there are still // pending updates for components earlier in the update ordering // than zones: RoT bootloader / RoT / SP / Host OS. - if !mgs_updates.is_empty() { - return Ok(false); - } + mgs_updates.is_empty() + } - match zone_kind { - ZoneKind::Nexus => { - // Nexus can only be updated if all non-Nexus zones have been - // updated, i.e., their image source is an artifact from the new - // repo. - let new_repo = self.input.tuf_repo().description(); - - // If we don't actually have a TUF repo here, we can't do - // updates anyway; any return value is fine. - if new_repo.tuf_repo().is_none() { + fn all_non_nexus_zones_using_new_image(&self) -> Result { + let new_repo = self.input.tuf_repo().description(); + for sled_id in self.blueprint.sled_ids_with_zones() { + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + let kind = z.zone_type.kind(); + if kind != ZoneKind::Nexus + && z.image_source != new_repo.zone_image_source(kind)? + { return Ok(false); } + } + } + return Ok(true); + } - // Check that all in-service zones (other than Nexus) on all - // sleds have an image source consistent with `new_repo`. - for sled_id in self.blueprint.sled_ids_with_zones() { - for z in self.blueprint.current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) { - let kind = z.zone_type.kind(); - if kind != ZoneKind::Nexus - && z.image_source - != new_repo.zone_image_source(kind)? - { - return Ok(false); - } - } + fn lookup_current_nexus_image( + &self, + ) -> Result { + // Look up the active Nexus zone in the blueprint to get its image + if let Some(image) = self + .blueprint + .parent_blueprint() + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_, blueprint_zone)| { + if self.input.active_nexus_zones().contains(&blueprint_zone.id) + { + Some(blueprint_zone.image_source.clone()) + } else { + None } + }) + { + Ok(image) + } else { + Err(Error::NoActiveNexusZonesInParentBlueprint) + } + } + + fn lookup_current_nexus_generation(&self) -> Result { + // Look up the active Nexus zone in the blueprint to get its generation + self.blueprint + .parent_blueprint() + .find_generation_for_nexus(self.input.active_nexus_zones()) + .map_err(|_| Error::NoActiveNexusZonesInParentBlueprint)? + .ok_or(Error::NoActiveNexusZonesInParentBlueprint) + } - Ok(true) + // Returns whether the out-of-date Nexus zone is ready to be updated. + // + // For reporting purposes, we assume that we want the supplied + // zone to be expunged or updated because it is out-of-date. + // + // If the zone should not be updated yet, updates the planner report to + // identify why it is not ready for update. + // + // Precondition: zone must be a Nexus zone and be running an out-of-date + // image + fn should_nexus_zone_be_expunged( + &self, + zone: &BlueprintZoneConfig, + report: &mut PlanningZoneUpdatesStepReport, + ) -> Result { + let zone_nexus_generation = match &zone.zone_type { + // For Nexus, we're only ready to "update" this zone once control + // has been handed off to a newer generation of Nexus zones. (Once + // that happens, we're not really going to update this zone, just + // expunge it.) + BlueprintZoneType::Nexus(nexus_zone) => { + // Get the nexus_generation of the zone being considered for shutdown + nexus_zone.nexus_generation } - _ => Ok(true), // other zone kinds have no special dependencies + _ => panic!("Not a Nexus zone"), + }; + + // Get the generation of the currently-executing Nexus zones. + // + // This presumably includes the currently-executing Nexus where + // this logic is being considered. + let current_gen = self.lookup_current_nexus_generation()?; + + // We need to prevent old Nexus zones from shutting themselves + // down. In other words: it's only safe to shut down if handoff + // has occurred. + // + // That only happens when the current generation of Nexus (the + // one running right now) does not match the zone we're + // considering expunging. + if current_gen == zone_nexus_generation { + report.waiting_zone( + zone, + ZoneWaitingToExpunge::Nexus { + zone_generation: zone_nexus_generation, + }, + ); + return Ok(false); } + + Ok(true) } /// Return `true` iff we believe a zone can safely be shut down; e.g., any @@ -2154,6 +2540,7 @@ pub(crate) mod test { use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::deployment::SledDisk; use nexus_types::deployment::TargetReleaseDescription; + use nexus_types::deployment::TufRepoPolicy; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::blueprint_zone_type::InternalDns; use nexus_types::external_api::views::PhysicalDiskState; @@ -2200,11 +2587,29 @@ pub(crate) mod test { ClickhousePolicy { version: 0, mode, time_created: Utc::now() } } - pub(crate) fn assert_planning_makes_no_changes( - log: &Logger, + fn get_nexus_ids_at_generation( blueprint: &Blueprint, - input: &PlanningInput, - collection: &Collection, + generation: Generation, + ) -> BTreeSet { + blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_, z)| match &z.zone_type { + BlueprintZoneType::Nexus(nexus_zone) + if nexus_zone.nexus_generation == generation => + { + Some(z.id) + } + _ => None, + }) + .collect::>() + } + + #[track_caller] + pub(crate) fn assert_planning_makes_no_changes( + log: &Logger, + blueprint: &Blueprint, + input: &PlanningInput, + collection: &Collection, test_name: &'static str, ) { let planner = Planner::new_based_on( @@ -2216,7 +2621,8 @@ pub(crate) mod test { PlannerRng::from_entropy(), ) .expect("created planner"); - let child_blueprint = planner.plan().expect("planning succeeded"); + let child_blueprint = + planner.plan().expect("planning should have succeded"); verify_blueprint(&child_blueprint); let summary = child_blueprint.diff_since_blueprint(&blueprint); eprintln!( @@ -3249,7 +3655,10 @@ pub(crate) mod test { .resources .zpools .iter_mut() - .next() + // Skip over the first disk - this is the one which hosts + // many of our zones, like Nexus, and is more complicated + // to expunge. + .nth(1) .unwrap() .1; expunged_disk.policy = PhysicalDiskPolicy::Expunged; @@ -3579,15 +3988,20 @@ pub(crate) mod test { // Create an example system with a single sled let (example, blueprint1) = - ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nsleds(1) + .nexus_count(2) + .build(); let collection = example.collection; let input = example.input; let mut builder = input.into_builder(); // Aside: Avoid churning on the quantity of Nexus zones - we're okay - // staying at one. - builder.policy_mut().target_nexus_zone_count = 1; + // staying at two. + // + // Force two so that we can't expunge our way down to zero. + builder.policy_mut().target_nexus_zone_count = 2; // Find whatever pool NTP was using let pool_to_expunge = blueprint1 @@ -4196,7 +4610,7 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); let (example, bp1) = - ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(0).build(); + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); let collection = example.collection; let input = example.input; let mut builder = input.into_builder(); @@ -5598,6 +6012,32 @@ pub(crate) mod test { } example.collection = example.system.to_collection_builder().unwrap().build(); + + update_input_with_nexus_at_generation( + example, + blueprint, + blueprint.nexus_generation, + ) + } + + fn update_input_with_nexus_at_generation( + example: &mut ExampleSystem, + blueprint: &Blueprint, + active_generation: Generation, + ) { + let active_nexus_zones = + get_nexus_ids_at_generation(&blueprint, active_generation); + let not_yet_nexus_zones = + get_nexus_ids_at_generation(&blueprint, active_generation.next()); + + let mut input = std::mem::replace( + &mut example.input, + nexus_types::deployment::PlanningInputBuilder::empty_input(), + ) + .into_builder(); + input.set_active_nexus_zones(active_nexus_zones); + input.set_not_yet_nexus_zones(not_yet_nexus_zones); + example.input = input.build(); } macro_rules! fake_zone_artifact { @@ -5639,8 +6079,8 @@ pub(crate) mod test { /// Ensure that dependent zones (here just Crucible Pantry) are updated /// before Nexus. #[test] - fn test_update_crucible_pantry() { - static TEST_NAME: &str = "update_crucible_pantry"; + fn test_update_crucible_pantry_before_nexus() { + static TEST_NAME: &str = "update_crucible_pantry_before_nexus"; let logctx = test_setup_log(TEST_NAME); let log = logctx.log.clone(); @@ -5742,101 +6182,105 @@ pub(crate) mod test { }; } - // Request another Nexus zone. - input_builder.policy_mut().target_nexus_zone_count = - input_builder.policy_mut().target_nexus_zone_count + 1; - let input = input_builder.build(); + let expected_new_nexus_zones = + input_builder.policy_mut().target_nexus_zone_count; + let expected_pantries = + input_builder.policy_mut().target_crucible_pantry_zone_count; + example.input = input_builder.build(); - // Check that there is a new nexus zone that does *not* use the new - // artifact (since not all of its dependencies are updated yet). - update_collection_from_blueprint(&mut example, &blueprint1); - let blueprint2 = Planner::new_based_on( - log.clone(), - &blueprint1, - &input, - "test_blueprint3", - &example.collection, - PlannerRng::from_seed((TEST_NAME, "bp3")), - ) - .expect("can't create planner") - .plan() - .expect("can't re-plan for new Nexus zone"); - { - let summary = blueprint2.diff_since_blueprint(&blueprint1); - for sled in summary.diff.sleds.modified_values_diff() { - assert!(sled.zones.removed.is_empty()); - assert_eq!(sled.zones.added.len(), 1); - let added = sled.zones.added.values().next().unwrap(); - assert!(matches!( - &added.zone_type, - BlueprintZoneType::Nexus(_) - )); - assert!(matches!( - &added.image_source, - BlueprintZoneImageSource::InstallDataset - )); - } - } + // We should now have iterations of expunge/cleanup/add iterations for + // the Crucible Pantry zones. + let mut parent = blueprint1; + + let mut old_pantries = expected_pantries; + let mut expunging_pantries = 0; + let mut added_pantries = 0; + let mut i = 0; - // We should now have three sets of expunge/add iterations for the - // Crucible Pantry zones. - let mut parent = blueprint2; - for i in 3..=8 { - let blueprint_name = format!("blueprint_{i}"); + while old_pantries > 0 + || expunging_pantries > 0 + || added_pantries != expected_pantries + { + let blueprint_name = format!("expunging_crucible_pantry_{i}"); + i += 1; update_collection_from_blueprint(&mut example, &parent); let blueprint = Planner::new_based_on( log.clone(), &parent, - &input, + &example.input, &blueprint_name, &example.collection, PlannerRng::from_seed((TEST_NAME, &blueprint_name)), ) .expect("can't create planner") .plan() - .unwrap_or_else(|_| panic!("can't re-plan after {i} iterations")); + .unwrap_or_else(|err| { + panic!("can't re-plan: {}", InlineErrorChain::new(&err)) + }); { let summary = blueprint.diff_since_blueprint(&parent); eprintln!("diff to {blueprint_name}: {}", summary.display()); for sled in summary.diff.sleds.modified_values_diff() { - if i % 2 == 1 { - assert!(sled.zones.added.is_empty()); - assert!(sled.zones.removed.is_empty()); - assert_eq!( - sled.zones - .common - .iter() - .filter(|(_, z)| matches!( - z.after.zone_type, - BlueprintZoneType::CruciblePantry(_) - ) && matches!( - z.after.disposition, - BlueprintZoneDisposition::Expunged { .. } - )) - .count(), - 1 - ); - } else { - assert!(sled.zones.removed.is_empty()); - assert_eq!(sled.zones.added.len(), 1); - let added = sled.zones.added.values().next().unwrap(); + assert!(sled.zones.removed.is_empty()); + + for modified_zone in sled.zones.modified_values_diff() { assert!(matches!( - &added.zone_type, + *modified_zone.zone_type.before, BlueprintZoneType::CruciblePantry(_) )); - assert_eq!(added.image_source, image_source); + // If the zone was previously in-service, it gets + // expunged. + if modified_zone.disposition.before.is_in_service() { + assert_eq!( + *modified_zone.image_source.before, + BlueprintZoneImageSource::InstallDataset + ); + assert!( + modified_zone.disposition.after.is_expunged(), + ); + old_pantries -= 1; + expunging_pantries += 1; + } + + // If the zone was previously expunged and not ready for + // cleanup, it should be marked ready-for-cleanup + if modified_zone.disposition.before.is_expunged() + && !modified_zone + .disposition + .before + .is_ready_for_cleanup() + { + assert!( + modified_zone + .disposition + .after + .is_ready_for_cleanup(), + ); + + expunging_pantries -= 1; + } + } + + for (_, zone) in &sled.zones.added { + match zone.zone_type { + BlueprintZoneType::CruciblePantry(_) => { + assert_eq!(zone.image_source, image_source); + added_pantries += 1; + } + _ => panic!("Unexpected zone add: {zone:?}"), + } } } } parent = blueprint; } - let blueprint8 = parent; + let mut blueprint = parent; // All Crucible Pantries should now be updated. assert_eq!( - blueprint8 + blueprint .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_up_to_date_pantry(z)) .count(), @@ -5845,31 +6289,108 @@ pub(crate) mod test { // All old Pantry zones should now be expunged. assert_eq!( - blueprint8 + blueprint .all_omicron_zones(BlueprintZoneDisposition::is_expunged) .filter(|(_, z)| is_old_pantry(z)) .count(), CRUCIBLE_PANTRY_REDUNDANCY ); + // Nexus should deploy new zones, but keep the old ones running. + update_collection_from_blueprint(&mut example, &blueprint); + let new_blueprint = Planner::new_based_on( + log.clone(), + &blueprint, + &example.input, + "test_blueprint_new_nexus", + &example.collection, + PlannerRng::from_seed((TEST_NAME, "test_blueprint_new_nexus")), + ) + .expect("Can't create planner") + .plan() + .expect("Can't re-plan for new Nexus zones"); + { + let summary = new_blueprint.diff_since_blueprint(&blueprint); + let mut modified_sleds = 0; + for sled in summary.diff.sleds.modified_values_diff() { + assert!(sled.zones.removed.is_empty()); + assert_eq!(sled.zones.added.len(), 1); + let added = sled.zones.added.values().next().unwrap(); + let BlueprintZoneType::Nexus(nexus_zone) = &added.zone_type + else { + panic!("Unexpected zone type: {:?}", added.zone_type); + }; + assert_eq!( + nexus_zone.nexus_generation, + Generation::new().next() + ); + assert_eq!(&added.image_source, &image_source); + modified_sleds += 1; + } + assert_eq!(modified_sleds, expected_new_nexus_zones); + } + blueprint = new_blueprint; + // Now we can update Nexus, because all of its dependent zones // are up-to-date w/r/t the new repo. - assert_eq!( - blueprint8 - .all_omicron_zones(BlueprintZoneDisposition::is_in_service) - .filter(|(_, z)| is_old_nexus(z)) - .count(), - NEXUS_REDUNDANCY + 1, - ); - let mut parent = blueprint8; - for i in 9..=16 { - update_collection_from_blueprint(&mut example, &parent); + // + // First, we'll expect the nexus generation to get bumped. + let active_nexus_zones = + get_nexus_ids_at_generation(&blueprint, Generation::new()); + let not_yet_nexus_zones = + get_nexus_ids_at_generation(&blueprint, Generation::new().next()); + + assert_eq!(active_nexus_zones.len(), NEXUS_REDUNDANCY); + assert_eq!(not_yet_nexus_zones.len(), NEXUS_REDUNDANCY); + + update_collection_from_blueprint(&mut example, &blueprint); + + // This is a replacement for the reconfigurator executor, which + // would normally propagate records for these zones into the + // database. + let mut input = std::mem::replace( + &mut example.input, + nexus_types::deployment::PlanningInputBuilder::empty_input(), + ) + .into_builder(); + input.set_active_nexus_zones(active_nexus_zones); + input.set_not_yet_nexus_zones(not_yet_nexus_zones); + example.input = input.build(); + let blueprint_name = "blueprint_to_bump_nexus_gen".to_string(); + let new_blueprint = Planner::new_based_on( + log.clone(), + &blueprint, + &example.input, + &blueprint_name, + &example.collection, + PlannerRng::from_seed((TEST_NAME, &blueprint_name)), + ) + .expect("can't create planner") + .plan() + .unwrap_or_else(|_| panic!("can't re-plan")); + { + let summary = new_blueprint.diff_since_blueprint(&blueprint); + assert!( + summary.has_changes(), + "Should have bumped nexus generation" + ); + assert_eq!( + summary.diff.nexus_generation.before.next(), + *summary.diff.nexus_generation.after, + ); + assert!(summary.diff.sleds.modified_values_diff().next().is_none()); + } + blueprint = new_blueprint; + + let mut parent = blueprint; + for i in 9..=12 { + update_collection_from_blueprint(&mut example, &parent); let blueprint_name = format!("blueprint{i}"); let blueprint = Planner::new_based_on( log.clone(), &parent, - &input, + &example.input, &blueprint_name, &example.collection, PlannerRng::from_seed((TEST_NAME, &blueprint_name)), @@ -5880,41 +6401,72 @@ pub(crate) mod test { { let summary = blueprint.diff_since_blueprint(&parent); + assert!(summary.has_changes(), "No changes at iteration {i}"); for sled in summary.diff.sleds.modified_values_diff() { - if i % 2 == 1 { - assert!(sled.zones.added.is_empty()); - assert!(sled.zones.removed.is_empty()); - } else { - assert!(sled.zones.removed.is_empty()); - assert_eq!(sled.zones.added.len(), 1); - let added = sled.zones.added.values().next().unwrap(); + assert!(sled.zones.added.is_empty()); + assert!(sled.zones.removed.is_empty()); + for modified_zone in sled.zones.modified_values_diff() { + // We're only modifying Nexus zones on the old image assert!(matches!( - &added.zone_type, + *modified_zone.zone_type.before, BlueprintZoneType::Nexus(_) )); - assert_eq!(added.image_source, image_source); + assert_eq!( + *modified_zone.image_source.before, + BlueprintZoneImageSource::InstallDataset + ); + + // If the zone was previously in-service, it gets + // expunged. + if modified_zone.disposition.before.is_in_service() { + assert!( + modified_zone.disposition.after.is_expunged(), + ); + } + + // If the zone was previously expunged and not ready for + // cleanup, it should be marked ready-for-cleanup + if modified_zone.disposition.before.is_expunged() + && !modified_zone + .disposition + .before + .is_ready_for_cleanup() + { + assert!( + modified_zone + .disposition + .after + .is_ready_for_cleanup(), + ); + } } } } - parent = blueprint; } // Everything's up-to-date in Kansas City! - let blueprint16 = parent; + let blueprint12 = parent; assert_eq!( - blueprint16 + blueprint12 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_up_to_date_nexus(z)) .count(), - NEXUS_REDUNDANCY + 1, + NEXUS_REDUNDANCY, + ); + assert_eq!( + blueprint12 + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter(|(_, z)| is_old_nexus(z)) + .count(), + 0, ); - update_collection_from_blueprint(&mut example, &blueprint16); + update_collection_from_blueprint(&mut example, &blueprint12); assert_planning_makes_no_changes( &logctx.log, - &blueprint16, - &input, + &blueprint12, + &example.input, &example.collection, TEST_NAME, ); @@ -7147,7 +7699,7 @@ pub(crate) mod test { .system .to_planning_input_builder() .expect("created PlanningInputBuilder"); - let input = input_builder.build(); + example.input = input_builder.build(); /// Expected number of planner iterations required to converge. /// If incidental planner work changes this value occasionally, @@ -7167,14 +7719,16 @@ pub(crate) mod test { let blueprint = Planner::new_based_on( log.clone(), &parent, - &input, + &example.input, &blueprint_name, &example.collection, PlannerRng::from_seed((TEST_NAME, &blueprint_name)), ) .expect("can't create planner") .plan() - .unwrap_or_else(|_| panic!("can't re-plan after {i} iterations")); + .unwrap_or_else(|err| { + panic!("can't re-plan after {i} iterations: {err}") + }); let BlueprintSource::Planner(report) = &blueprint.source else { panic!("unexpected source: {:?}", blueprint.source); @@ -7188,6 +7742,8 @@ pub(crate) mod test { if summary.total_zones_added() == 0 && summary.total_zones_removed() == 0 && summary.total_zones_modified() == 0 + && summary.before.nexus_generation + == summary.after.nexus_generation { assert!( blueprint @@ -7208,10 +7764,715 @@ pub(crate) mod test { return; } } - parent = blueprint; } panic!("did not converge after {MAX_PLANNING_ITERATIONS} iterations"); } + + struct BlueprintGenerator { + log: Logger, + example: ExampleSystem, + blueprint: Blueprint, + rng: SimRngState, + target_release_generation: Generation, + } + + impl BlueprintGenerator { + fn new( + log: Logger, + example: ExampleSystem, + blueprint: Blueprint, + rng: SimRngState, + ) -> Self { + Self { + log, + example, + blueprint, + rng, + target_release_generation: Generation::new(), + } + } + + fn create_image_at_version( + version: &ArtifactVersion, + ) -> BlueprintZoneImageSource { + let fake_hash = ArtifactHash([0; 32]); + BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: version.clone(), + }, + hash: fake_hash, + } + } + + // - Bumps the target_release_generation + // - Sets a new "tuf_repo" as part of the "example.input" + // - Sets artifacts in the repo to `artifacts` + fn set_new_tuf_repo_with_artifacts( + &mut self, + artifacts: Vec, + system_version: Version, + ) { + let mut input_builder = self.example.input.clone().into_builder(); + let fake_hash = ArtifactHash([0; 32]); + self.target_release_generation = + self.target_release_generation.next(); + + let tuf_repo = TufRepoPolicy { + target_release_generation: self.target_release_generation, + description: TargetReleaseDescription::TufRepo( + TufRepoDescription { + repo: TufRepoMeta { + hash: fake_hash, + targets_role_version: 0, + valid_until: Utc::now(), + system_version, + file_name: String::from(""), + }, + artifacts, + }, + ), + }; + + input_builder.policy_mut().tuf_repo = tuf_repo; + self.example.input = input_builder.build(); + } + + // Sets the old tuf repo to the current target repo + fn set_old_tuf_repo_to_target(&mut self) { + let mut input_builder = self.example.input.clone().into_builder(); + input_builder.policy_mut().old_repo = + self.example.input.tuf_repo().clone(); + self.example.input = input_builder.build(); + } + + // Plans a new blueprint, validates it, and returns it + // + // Does not set the current blueprint to this new value + #[track_caller] + fn plan_new_blueprint(&mut self, name: &str) -> Blueprint { + let planner = Planner::new_based_on( + self.log.clone(), + &self.blueprint, + &self.example.input, + name, + &self.example.collection, + self.rng.next_planner_rng(), + ) + .expect("can't create planner"); + let bp = planner.plan().expect("planning succeeded"); + verify_blueprint(&bp); + bp + } + + // Creates a new blueprint builder for manually editing a blueprint + fn new_blueprint_builder( + &mut self, + name: &str, + ) -> BlueprintBuilder<'_> { + BlueprintBuilder::new_based_on( + &self.log, + &self.blueprint, + &self.example.input, + &self.example.collection, + name, + self.rng.next_planner_rng(), + ) + .expect("can't create blueprint builder") + } + + // Asserts that a new blueprint, if generated, will make no changes + #[track_caller] + fn assert_child_bp_makes_no_changes( + &self, + child_blueprint: &Blueprint, + ) { + verify_blueprint(&child_blueprint); + let summary = child_blueprint.diff_since_blueprint(&self.blueprint); + assert_eq!( + summary.diff.sleds.added.len(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.diff.sleds.removed.len(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.diff.sleds.modified().count(), + 0, + "{}", + summary.display() + ); + } + + // Asserts that a new blueprint, if generated, will have no report. + // + // This function explicitly ignores the "noop_image_source" report. + // + // NOTE: More reports can be added, but we aren't using + // "PlanningReport::is_empty()", because some checks (e.g. + // noop_image_source) are almost always non-empty. + #[track_caller] + fn assert_child_bp_has_no_report(&self, child_blueprint: &Blueprint) { + verify_blueprint(&child_blueprint); + let summary = child_blueprint.diff_since_blueprint(&self.blueprint); + + let BlueprintSource::Planner(report) = &child_blueprint.source + else { + panic!("Child blueprint has no associated report"); + }; + + assert!( + report.expunge.is_empty() + && report.decommission.is_empty() + && report.mgs_updates.is_empty() + && report.add.is_empty() + && report.zone_updates.is_empty() + && report.nexus_generation_bump.is_empty() + && report.cockroachdb_settings.is_empty(), + "Blueprint Summary: {}\n + Planning report is not empty: {}", + summary.display(), + report, + ); + } + + // Updates the input inventory to reflect changes from the blueprint + fn update_inventory_from_blueprint(&mut self) { + update_collection_from_blueprint( + &mut self.example, + &self.blueprint, + ); + } + } + + #[test] + fn test_nexus_generation_assignment_multiple_generations() { + static TEST_NAME: &str = + "test_nexus_generation_assignment_multiple_generations"; + let logctx = test_setup_log(TEST_NAME); + + // Use our example system with multiple Nexus zones + let mut rng = SimRngState::from_seed(TEST_NAME); + let (example, blueprint) = ExampleSystemBuilder::new_with_rng( + &logctx.log, + rng.next_system_rng(), + ) + .nexus_count(3) + .build(); + verify_blueprint(&blueprint); + + let mut bp_generator = BlueprintGenerator::new( + logctx.log.clone(), + example, + blueprint, + rng, + ); + + // We shouldn't try to bump the generation number without a new TUF + // repo. + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // Set up a TUF repo with new artifacts + let artifact_version_1 = + ArtifactVersion::new_static("1.0.0-nexus-gen-test") + .expect("can't parse artifact version"); + let artifact_version_2 = + ArtifactVersion::new_static("2.0.0-nexus-gen-test") + .expect("can't parse artifact version"); + + // First: Make everything use artifacts from version "1.0.0". + // Treat this as the starting point instead of InstallDataset, + // as this has an ambiguous version, and typically requires + // image resolution to occur first before zones can be added. + // + // Next: Set the target to "2.0.0", upgrade everything except + // Nexus. + bp_generator.set_new_tuf_repo_with_artifacts( + create_artifacts_at_version(&artifact_version_1), + Version::new(1, 0, 0), + ); + bp_generator.set_old_tuf_repo_to_target(); + bp_generator.set_new_tuf_repo_with_artifacts( + create_artifacts_at_version(&artifact_version_2), + Version::new(2, 0, 0), + ); + let image_source_1 = + BlueprintGenerator::create_image_at_version(&artifact_version_1); + let image_source_2 = + BlueprintGenerator::create_image_at_version(&artifact_version_2); + assert_ne!(image_source_1, image_source_2); + + // Manually update all non-Nexus zones to the new image source + // + // Manually make the Nexus zones all become "1.0.0" so they're + // not using the install dataset. + for sled_config in bp_generator.blueprint.sleds.values_mut() { + for mut zone in &mut sled_config.zones { + if zone.zone_type.kind() != ZoneKind::Nexus { + zone.image_source = image_source_2.clone(); + } else { + zone.image_source = image_source_1.clone(); + } + } + } + + // Also, manually edit the blueprint to expunge one Nexus. + // + // This tests that we can restore redundancy of the old Nexuses + // while we're also deploying the new ones. + let (sled, zone, _) = bp_generator + .blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .next() + .unwrap(); + let nexus_id = zone.id; + let mut bp_builder = bp_generator.new_blueprint_builder("expunge-one"); + bp_builder.sled_expunge_zone(sled, nexus_id).unwrap(); + bp_generator.blueprint = bp_builder.build(BlueprintSource::Test); + + // We should have two old Nexuses, both running with the old generation + // and the old image. + let nexuses = bp_generator + .blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .collect::>(); + assert_eq!(nexuses.len(), 2); + for (_, zone, nexus) in nexuses { + assert_eq!(zone.image_source, image_source_1,); + assert_eq!( + nexus.nexus_generation, + bp_generator.blueprint.nexus_generation + ); + } + + // Plan a new blueprint, which will provision new Nexus zones + // and restore redundancy of the expunged old Nexus zone. + bp_generator.update_inventory_from_blueprint(); + let old_generation = bp_generator.blueprint.nexus_generation; + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_new_nexus_db"); + { + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!( + summary.total_zones_added(), + bp_generator.example.input.target_nexus_zone_count() + 1 + ); + assert_eq!(summary.total_zones_removed(), 0); + assert_eq!(summary.total_zones_modified(), 1); + + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + assert!( + matches!( + report.nexus_generation_bump, + PlanningNexusGenerationBumpReport::WaitingOn( + NexusGenerationBumpWaitingOn::MissingNexusDatabaseAccessRecords + ), + ), + "Unexpected Nexus Generation report: {:?}", + report.nexus_generation_bump + ); + } + bp_generator.blueprint = new_bp; + + // We should now have 6 nexuses: 3 with the old image using the old + // generation, and 3 with the new image using the new image. + let nexuses = bp_generator + .blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .collect::>(); + assert_eq!(nexuses.len(), 6); + for (_, zone, nexus) in nexuses { + let bp = &bp_generator.blueprint; + + // Old Nexuses + if nexus.nexus_generation == bp.nexus_generation { + assert_eq!(zone.image_source, image_source_1,); + // New Nexuses + } else if nexus.nexus_generation == bp.nexus_generation.next() { + assert_eq!(zone.image_source, image_source_2,); + } else { + panic!("Unexpected nexus generation"); + } + } + + logctx.cleanup_successful(); + } + + #[test] + fn test_nexus_generation_update() { + static TEST_NAME: &str = "test_nexus_generation_update"; + let logctx = test_setup_log(TEST_NAME); + + // Use our example system with multiple Nexus zones + let mut rng = SimRngState::from_seed(TEST_NAME); + let (example, blueprint) = ExampleSystemBuilder::new_with_rng( + &logctx.log, + rng.next_system_rng(), + ) + .nexus_count(3) + .build(); + verify_blueprint(&blueprint); + + let mut bp_generator = BlueprintGenerator::new( + logctx.log.clone(), + example, + blueprint, + rng, + ); + + // We shouldn't try to bump the generation number without a new TUF + // repo. + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // Initially, all zones should be sourced from the install dataset + assert!( + bp_generator + .blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .all(|(_, z)| matches!( + z.image_source, + BlueprintZoneImageSource::InstallDataset + )) + ); + + // Set up a TUF repo with new artifacts + let artifact_version_1 = + ArtifactVersion::new_static("1.0.0-nexus-gen-test") + .expect("can't parse artifact version"); + let artifact_version_2 = + ArtifactVersion::new_static("2.0.0-nexus-gen-test") + .expect("can't parse artifact version"); + bp_generator.set_new_tuf_repo_with_artifacts( + create_artifacts_at_version(&artifact_version_1), + Version::new(1, 0, 0), + ); + bp_generator.set_old_tuf_repo_to_target(); + bp_generator.set_new_tuf_repo_with_artifacts( + create_artifacts_at_version(&artifact_version_2), + Version::new(2, 0, 0), + ); + let image_source_1 = + BlueprintGenerator::create_image_at_version(&artifact_version_1); + let image_source_2 = + BlueprintGenerator::create_image_at_version(&artifact_version_2); + + // Check: Initially, nexus generation update should be blocked because + // non-Nexus zones haven't been updated yet + { + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_non_nexus"); + + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + // The blueprint should have a report showing what's blocked + assert!( + matches!( + report.nexus_generation_bump, + PlanningNexusGenerationBumpReport::WaitingOn( + NexusGenerationBumpWaitingOn::FoundOldNonNexusZones + ), + ), + "Unexpected Nexus Generation report: {:?}", + report.nexus_generation_bump + ); + } + + // Manually update all non-Nexus zones to the new image source + // + // Manually make the Nexus zones all become "1.0.0" so they're + // not using the install dataset. + for sled_config in bp_generator.blueprint.sleds.values_mut() { + for mut zone in &mut sled_config.zones { + if zone.zone_type.kind() != ZoneKind::Nexus { + zone.image_source = image_source_2.clone(); + } else { + zone.image_source = image_source_1.clone(); + } + } + } + bp_generator.update_inventory_from_blueprint(); + + // Check: Now nexus generation update should be blocked by lack of new + // Nexus zones. + // + // When we create the new blueprint, we'll plan for the new zones, but: + // ✘ The new Nexus Zones are missing db records (we see this first) + // ✘ The new Nexus Zones aren't in inventory. + let old_generation = bp_generator.blueprint.nexus_generation; + let new_generation = old_generation.next(); + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_new_nexus_db"); + { + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + + // This new blueprint does do *something* - it adds new Nexus zones, + // even though they aren't sufficiently "up" for the nexus + // generation bump. + assert_eq!( + summary.total_zones_added(), + bp_generator.example.input.target_nexus_zone_count() + ); + assert_eq!(summary.total_zones_removed(), 0); + assert_eq!(summary.total_zones_modified(), 0); + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + assert!( + matches!( + report.nexus_generation_bump, + PlanningNexusGenerationBumpReport::WaitingOn( + NexusGenerationBumpWaitingOn::MissingNexusDatabaseAccessRecords + ), + ), + "Unexpected Nexus Generation report: {:?}", + report.nexus_generation_bump + ); + } + bp_generator.blueprint = new_bp; + + // "Propagate the DB records to database", by creating records for them. + let new_nexus_zones: BTreeSet<_> = bp_generator + .blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_, zone)| { + match &zone.zone_type { + BlueprintZoneType::Nexus(nexus_zone) => { + if nexus_zone.nexus_generation == new_generation { + return Some(zone.id); + } + } + _ => (), + } + None + }) + .collect(); + let mut input = std::mem::replace( + &mut bp_generator.example.input, + nexus_types::deployment::PlanningInputBuilder::empty_input(), + ) + .into_builder(); + input.set_not_yet_nexus_zones(new_nexus_zones.clone()); + bp_generator.example.input = input.build(); + + // Check: If we try generating a new blueprint, we're still stuck behind + // propagation to inventory. + // + // ✔ The new Nexus Zones have db records + // ✘ The new Nexus Zones aren't in inventory. + // + // We'll refuse to bump the top-level generation number (which would + // begin quiescing old Nexuses) until we've seen that the new nexus + // zones are up. + { + let new_bp = + bp_generator.plan_new_blueprint("wait_for_propagation"); + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!(summary.total_zones_added(), 0); + assert_eq!(summary.total_zones_removed(), 0); + assert_eq!(summary.total_zones_modified(), 0); + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + assert!( + matches!( + report.nexus_generation_bump, + PlanningNexusGenerationBumpReport::WaitingOn( + NexusGenerationBumpWaitingOn::MissingNewNexusInInventory + ), + ), + "Unexpected Nexus Generation report: {:?}", + report.nexus_generation_bump + ); + } + + // Make the new Nexus zones appear in inventory + bp_generator.update_inventory_from_blueprint(); + + // If the Nexus zones appear in inventory, but we're missing db records + // for any of these new zones, we shouldn't proceed. + // + // ✘ The new Nexus Zones are missing db records + // ✔ The new Nexus Zones are in inventory. + let mut input = std::mem::replace( + &mut bp_generator.example.input, + nexus_types::deployment::PlanningInputBuilder::empty_input(), + ) + .into_builder(); + input.set_not_yet_nexus_zones(BTreeSet::new()); + bp_generator.example.input = input.build(); + + { + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_db_records"); + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert!(!summary.has_changes()); + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + assert!( + matches!( + report.nexus_generation_bump, + PlanningNexusGenerationBumpReport::WaitingOn( + NexusGenerationBumpWaitingOn::MissingNexusDatabaseAccessRecords + ), + ), + "Unexpected Nexus Generation report: {:?}", + report.nexus_generation_bump + ); + } + let mut input = std::mem::replace( + &mut bp_generator.example.input, + nexus_types::deployment::PlanningInputBuilder::empty_input(), + ) + .into_builder(); + input.set_not_yet_nexus_zones(new_nexus_zones); + bp_generator.example.input = input.build(); + + // Check: Now nexus generation update should succeed. + // + // Finally: + // ✔ The new Nexus Zones have db records + // ✔ The new Nexus Zones are in inventory. + let new_bp = bp_generator.plan_new_blueprint("update_generation"); + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + // Finally, the top-level Nexus generation should get bumped. + let PlanningNexusGenerationBumpReport::BumpingGeneration( + observed_next_gen, + ) = &report.nexus_generation_bump + else { + panic!( + "Unexpected nexus generation report: {:?}", + report.nexus_generation_bump, + ); + }; + + assert_eq!(new_bp.nexus_generation, new_generation); + assert_eq!(*observed_next_gen, new_generation); + bp_generator.blueprint = new_bp; + + // After the nexus generation bump, if we're still running from an old + // Nexus, we should refuse to expunge ourselves. + bp_generator.update_inventory_from_blueprint(); + update_input_with_nexus_at_generation( + &mut bp_generator.example, + &bp_generator.blueprint, + old_generation, + ); + let new_bp = bp_generator.plan_new_blueprint("dont-expunge-yet"); + let BlueprintSource::Planner(report) = &new_bp.source else { + panic!("blueprint has no associated report"); + }; + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + // We should be able to see all three old Nexus zones refusing to shut + // down in the planning report. + let waiting_zones = &report.zone_updates.waiting_zones; + assert_eq!( + waiting_zones.len(), + 3, + "Unexpected zone update report: {:#?}", + report.zone_updates + ); + for why in waiting_zones.values() { + assert_eq!( + why, + &ZoneWaitingToExpunge::Nexus { + zone_generation: old_generation, + }, + "Unexpected waiting zones report: {:#?}", + waiting_zones, + ); + } + + // Update our input to run from the new Nexuses. + // + // After the generation bump, we should begin expunging the old Nexus + // zones. + bp_generator.update_inventory_from_blueprint(); + // Old Nexuses which are in-service + let mut old_nexuses = + bp_generator.example.input.target_nexus_zone_count(); + // Old Nexuses which were expunged, but which still need propagation + let mut expunging_nexuses = 0; + + while old_nexuses > 0 || expunging_nexuses > 0 { + let new_bp = bp_generator.plan_new_blueprint("removal"); + + // We expect to expunge one old nexus at a time, if any exist, and + // also to finalize the expungement of old nexuses that were removed + // in prior iterations. + let expected_modified_nexuses = + expunging_nexuses + if old_nexuses > 0 { 1 } else { 0 }; + + { + let summary = + new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!( + summary.total_zones_added(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.total_zones_removed(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.total_zones_modified(), + expected_modified_nexuses, + "{}", + summary.display() + ); + } + if old_nexuses > 0 { + old_nexuses -= 1; + expunging_nexuses = 1; + } else { + expunging_nexuses = 0; + } + + bp_generator.blueprint = new_bp; + bp_generator.update_inventory_from_blueprint(); + } + + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // Check: If the "old TUF repo = new TUF repo", we'll still make no changes + bp_generator.set_old_tuf_repo_to_target(); + let new_bp = bp_generator.plan_new_blueprint("repo-update"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // After all this, the Nexus generation number has still been updated + // exactly once. + assert_eq!(new_bp.nexus_generation, new_generation); + + logctx.cleanup_successful(); + } } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 1fb8815ebb7..343f91b8169 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -56,6 +56,7 @@ use serde::Deserialize; use serde::Serialize; use slog::Key; use std::collections::BTreeMap; +use std::collections::BTreeSet; use std::fmt; use std::net::Ipv6Addr; use std::net::SocketAddrV6; @@ -116,12 +117,14 @@ pub use planning_input::TufRepoContentsError; pub use planning_input::TufRepoPolicy; pub use planning_input::ZpoolFilter; pub use planning_report::CockroachdbUnsafeToShutdown; +pub use planning_report::NexusGenerationBumpWaitingOn; pub use planning_report::PlanningAddStepReport; pub use planning_report::PlanningCockroachdbSettingsStepReport; pub use planning_report::PlanningDecommissionStepReport; pub use planning_report::PlanningExpungeStepReport; pub use planning_report::PlanningMgsUpdatesStepReport; pub use planning_report::PlanningMupdateOverrideStepReport; +pub use planning_report::PlanningNexusGenerationBumpReport; pub use planning_report::PlanningNoopImageSourceSkipSledHostPhase2Reason; pub use planning_report::PlanningNoopImageSourceSkipSledZonesReason; pub use planning_report::PlanningNoopImageSourceSkipZoneReason; @@ -131,6 +134,7 @@ pub use planning_report::PlanningZoneUpdatesStepReport; pub use planning_report::ZoneAddWaitingOn; pub use planning_report::ZoneUnsafeToShutdown; pub use planning_report::ZoneUpdatesWaitingOn; +pub use planning_report::ZoneWaitingToExpunge; pub use reconfigurator_config::PlannerConfig; pub use reconfigurator_config::PlannerConfigDiff; pub use reconfigurator_config::PlannerConfigDisplay; @@ -425,6 +429,35 @@ impl Blueprint { Ok(zone_config.nexus_generation < self.nexus_generation) } + + /// Given a set of Nexus zone UUIDs, returns the "nexus generation" + /// of these zones in the blueprint. + /// + /// Returns [`Option::None`] if none of these zones are found. + /// + /// Returns an error if there are multiple distinct generations for these + /// zones. + pub fn find_generation_for_nexus( + &self, + nexus_zones: &BTreeSet, + ) -> Result, anyhow::Error> { + let mut gen = None; + for (_, zone, nexus_zone) in + self.all_nexus_zones(BlueprintZoneDisposition::is_in_service) + { + if nexus_zones.contains(&zone.id) { + let found_gen = nexus_zone.nexus_generation; + if let Some(gen) = gen { + if found_gen != gen { + bail!("Multiple generations found for these zones"); + } + } + gen = Some(found_gen); + } + } + + Ok(gen) + } } /// Description of the source of a blueprint. diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index 90057685774..8880fd65847 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -13,6 +13,7 @@ use super::PlannerConfig; use daft::Diffable; use indent_write::fmt::IndentWriter; +use omicron_common::api::external::Generation; use omicron_common::policy::COCKROACHDB_REDUNDANCY; use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::MupdateOverrideUuid; @@ -63,6 +64,7 @@ pub struct PlanningReport { pub mgs_updates: PlanningMgsUpdatesStepReport, pub add: PlanningAddStepReport, pub zone_updates: PlanningZoneUpdatesStepReport, + pub nexus_generation_bump: PlanningNexusGenerationBumpReport, pub cockroachdb_settings: PlanningCockroachdbSettingsStepReport, } @@ -79,6 +81,7 @@ impl PlanningReport { ), add: PlanningAddStepReport::new(), zone_updates: PlanningZoneUpdatesStepReport::new(), + nexus_generation_bump: PlanningNexusGenerationBumpReport::new(), cockroachdb_settings: PlanningCockroachdbSettingsStepReport::new(), } } @@ -90,6 +93,7 @@ impl PlanningReport { && self.mgs_updates.is_empty() && self.add.is_empty() && self.zone_updates.is_empty() + && self.nexus_generation_bump.is_empty() && self.cockroachdb_settings.is_empty() } } @@ -112,6 +116,7 @@ impl fmt::Display for PlanningReport { mgs_updates, add, zone_updates, + nexus_generation_bump, cockroachdb_settings, } = self; writeln!(f, "planning report for blueprint {blueprint_id}:")?; @@ -124,6 +129,7 @@ impl fmt::Display for PlanningReport { mgs_updates.fmt(f)?; add.fmt(f)?; zone_updates.fmt(f)?; + nexus_generation_bump.fmt(f)?; cockroachdb_settings.fmt(f)?; } Ok(()) @@ -838,6 +844,7 @@ pub struct PlanningZoneUpdatesStepReport { pub expunged_zones: BTreeMap>, pub updated_zones: BTreeMap>, pub unsafe_zones: BTreeMap, + pub waiting_zones: BTreeMap, } impl PlanningZoneUpdatesStepReport { @@ -848,6 +855,7 @@ impl PlanningZoneUpdatesStepReport { expunged_zones: BTreeMap::new(), updated_zones: BTreeMap::new(), unsafe_zones: BTreeMap::new(), + waiting_zones: BTreeMap::new(), } } @@ -863,6 +871,7 @@ impl PlanningZoneUpdatesStepReport { && self.expunged_zones.is_empty() && self.updated_zones.is_empty() && self.unsafe_zones.is_empty() + && self.waiting_zones.is_empty() } pub fn out_of_date_zone( @@ -910,6 +919,14 @@ impl PlanningZoneUpdatesStepReport { ) { self.unsafe_zones.insert(zone.clone(), reason); } + + pub fn waiting_zone( + &mut self, + zone: &BlueprintZoneConfig, + reason: ZoneWaitingToExpunge, + ) { + self.waiting_zones.insert(zone.clone(), reason); + } } impl fmt::Display for PlanningZoneUpdatesStepReport { @@ -920,6 +937,7 @@ impl fmt::Display for PlanningZoneUpdatesStepReport { expunged_zones, updated_zones, unsafe_zones, + waiting_zones, } = self; if let Some(waiting_on) = waiting_on { @@ -977,6 +995,20 @@ impl fmt::Display for PlanningZoneUpdatesStepReport { } } + if !waiting_zones.is_empty() { + let (n, s) = plural_map(waiting_zones); + writeln!(f, "* {n} zone{s} waiting to be expunged:")?; + for (zone, reason) in waiting_zones.iter() { + writeln!( + f, + " * zone {} ({}): {}", + zone.id, + zone.zone_type.kind().report_str(), + reason, + )?; + } + } + Ok(()) } } @@ -1008,6 +1040,8 @@ impl ZoneUpdatesWaitingOn { } } +/// Zones which should not be shut down, because their lack of availability +/// could be problematic for the successful functioning of the deployed system. #[derive( Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, )] @@ -1034,6 +1068,124 @@ impl fmt::Display for ZoneUnsafeToShutdown { } } +/// Out-of-date zones which are not yet ready to be expunged. +/// +/// For example, out-of-date Nexus zones should not be expunged until +/// handoff has completed. +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +#[serde(rename_all = "snake_case", tag = "type")] +pub enum ZoneWaitingToExpunge { + Nexus { zone_generation: Generation }, +} + +impl fmt::Display for ZoneWaitingToExpunge { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Nexus { zone_generation } => { + write!( + f, + "image out-of-date, but zone's nexus_generation \ + {zone_generation} is still active" + ) + } + } + } +} + +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +#[serde(tag = "component", rename_all = "snake_case", content = "value")] +pub enum PlanningNexusGenerationBumpReport { + /// We have no reason to bump the Nexus generation number. + NothingToReport, + + /// We are waiting on some condition before we can bump the + /// Nexus generation. + WaitingOn(NexusGenerationBumpWaitingOn), + + /// We are bumping the Nexus generation number to this value. + BumpingGeneration(Generation), +} + +impl PlanningNexusGenerationBumpReport { + pub fn new() -> Self { + Self::NothingToReport + } + + pub fn is_empty(&self) -> bool { + matches!(self, Self::NothingToReport) + } + + pub fn set_waiting_on(&mut self, why: NexusGenerationBumpWaitingOn) { + *self = Self::WaitingOn(why); + } + + pub fn set_next_generation(&mut self, next_generation: Generation) { + *self = Self::BumpingGeneration(next_generation); + } +} + +impl fmt::Display for PlanningNexusGenerationBumpReport { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::WaitingOn(why) => { + writeln!( + f, + "* waiting to update top-level nexus_generation: {}", + why.as_str() + )?; + } + Self::BumpingGeneration(gen) => { + writeln!(f, "* updating top-level nexus_generation to: {gen}")?; + } + // Nothing to report + Self::NothingToReport => (), + } + Ok(()) + } +} + +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +#[serde(rename_all = "snake_case", tag = "type")] +pub enum NexusGenerationBumpWaitingOn { + /// Waiting for the planner to finish updating all non-Nexus zones + FoundOldNonNexusZones, + + /// Waiting for the planner to deploy new-generation Nexus zones + MissingNewNexusInBlueprint, + + /// Waiting for `db_metadata_nexus` records to be deployed for + /// new-generation Nexus zones + MissingNexusDatabaseAccessRecords, + + /// Waiting for newly deployed Nexus zones to appear to inventory + MissingNewNexusInInventory, +} + +impl NexusGenerationBumpWaitingOn { + pub fn as_str(&self) -> &'static str { + match self { + Self::FoundOldNonNexusZones => { + "some non-Nexus zone are not yet updated" + } + Self::MissingNewNexusInBlueprint => { + "new Nexus zones have not been planned yet" + } + Self::MissingNexusDatabaseAccessRecords => { + "new Nexus zones do not have database records yet" + } + Self::MissingNewNexusInInventory => { + "new Nexus zones are not in inventory yet" + } + } + } +} + #[derive( Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, )] diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 64cfc253c10..8c8f8957d77 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3107,6 +3107,9 @@ "mgs_updates": { "$ref": "#/components/schemas/PlanningMgsUpdatesStepReport" }, + "nexus_generation_bump": { + "$ref": "#/components/schemas/PlanningNexusGenerationBumpReport" + }, "noop_image_source": { "$ref": "#/components/schemas/PlanningNoopImageSourceStepReport" }, @@ -3135,6 +3138,7 @@ "decommission", "expunge", "mgs_updates", + "nexus_generation_bump", "noop_image_source", "planner_config", "source", @@ -6030,6 +6034,70 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "NexusGenerationBumpWaitingOn": { + "oneOf": [ + { + "description": "Waiting for the planner to finish updating all non-Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "found_old_non_nexus_zones" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for the planner to deploy new-generation Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_new_nexus_in_blueprint" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for `db_metadata_nexus` records to be deployed for new-generation Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_nexus_database_access_records" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for newly deployed Nexus zones to appear to inventory", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_new_nexus_in_inventory" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "NodeName": { "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", "type": "string" @@ -7118,6 +7186,63 @@ "pending_mgs_updates" ] }, + "PlanningNexusGenerationBumpReport": { + "oneOf": [ + { + "description": "We have no reason to bump the Nexus generation number.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "nothing_to_report" + ] + } + }, + "required": [ + "component" + ] + }, + { + "description": "We are waiting on some condition before we can bump the Nexus generation.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "waiting_on" + ] + }, + "value": { + "$ref": "#/components/schemas/NexusGenerationBumpWaitingOn" + } + }, + "required": [ + "component", + "value" + ] + }, + { + "description": "We are bumping the Nexus generation number to this value.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "bumping_generation" + ] + }, + "value": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "component", + "value" + ] + } + ] + }, "PlanningNoopImageSourceConverted": { "description": "How many of the total install-dataset zones and/or host phase 2 slots were noop-converted to use the artifact store on a particular sled.", "type": "object", @@ -7430,13 +7555,20 @@ "$ref": "#/components/schemas/ZoneUpdatesWaitingOn" } ] + }, + "waiting_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ZoneWaitingToExpunge" + } } }, "required": [ "expunged_zones", "out_of_date_zones", "unsafe_zones", - "updated_zones" + "updated_zones", + "waiting_zones" ] }, "PortConfigV2": { @@ -9732,6 +9864,7 @@ ] }, "ZoneUnsafeToShutdown": { + "description": "Zones which should not be shut down, because their lack of availability could be problematic for the successful functioning of the deployed system.", "oneOf": [ { "type": "object", @@ -9854,6 +9987,29 @@ } ] }, + "ZoneWaitingToExpunge": { + "description": "Out-of-date zones which are not yet ready to be expunged.\n\nFor example, out-of-date Nexus zones should not be expunged until handoff has completed.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "nexus" + ] + }, + "zone_generation": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "type", + "zone_generation" + ] + } + ] + }, "ZpoolName": { "title": "The name of a Zpool", "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", From 2cb7a2c44deb3c71c7573f920d2ec35ed6b3e627 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 23 Sep 2025 00:46:14 -0700 Subject: [PATCH 04/18] [5/n] [reconfigurator-planning] gate adds/updates on zone image sources being known (#8921) Expand the set of gates for adds/updates to include the fact that zone image sources should be known. Add tests for this: * `cmds-mupdate-update-flow` contains the bulk of testing for this scenario. * I had to make tweaks to some tests, particularly to `cmds-target-release.txt`, in order to start running the test in earnest from the Artifact state rather than the InstallDataset state. --- .../tests/input/cmds-add-sled-no-disks.txt | 4 + .../cmds-expunge-newly-added-external-dns.txt | 5 + .../cmds-expunge-newly-added-internal-dns.txt | 4 + .../tests/input/cmds-mupdate-update-flow.txt | 16 +- .../tests/input/cmds-target-release.txt | 41 +- .../output/cmds-add-sled-no-disks-stdout | 31 +- .../tests/output/cmds-example-stdout | 19 +- ...ds-expunge-newly-added-external-dns-stdout | 30 +- ...ds-expunge-newly-added-internal-dns-stdout | 19 +- .../output/cmds-mupdate-update-flow-stdout | 339 +- .../output/cmds-noop-image-source-stdout | 13 + .../tests/output/cmds-target-release-stdout | 3365 +++++++++++------ .../planning/src/blueprint_builder/builder.rs | 4 +- nexus/reconfigurator/planning/src/planner.rs | 615 ++- .../planner_decommissions_sleds_bp2.txt | 9 + .../output/planner_nonprovisionable_bp2.txt | 10 + .../app/background/tasks/blueprint_planner.rs | 9 +- nexus/types/src/deployment.rs | 10 + 18 files changed, 3155 insertions(+), 1388 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt index 971b27eb2bc..2f1d99e8411 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt @@ -12,6 +12,10 @@ sled-add --ndisks 0 # Generate a new inventory collection that includes that sled. inventory-generate +# Set the add_zones_with_mupdate_override planner config to ensure that zone +# adds happen despite zone image sources not being Artifact. +set planner-config --add-zones-with-mupdate-override true + # Try to plan a new blueprint; this should be okay even though the sled # we added has no disks. blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-external-dns.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-external-dns.txt index 6ac544f14b1..744538ab910 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-external-dns.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-external-dns.txt @@ -9,6 +9,11 @@ blueprint-edit 3f00b694-1b16-4aaa-8f78-e6b3a527b434 expunge-zones 8429c772-07e8- blueprint-diff 3f00b694-1b16-4aaa-8f78-e6b3a527b434 366b0b68-d80e-4bc1-abd3-dc69837847e0 blueprint-show 366b0b68-d80e-4bc1-abd3-dc69837847e0 + +# Set the add_zones_with_mupdate_override planner config to ensure that zone +# adds happen despite zone image sources not being Artifact. +set planner-config --add-zones-with-mupdate-override true + # blueprint-plan will place a new external DNS zone, diff DNS to see the new zone has `ns` and NS records. blueprint-plan 366b0b68-d80e-4bc1-abd3-dc69837847e0 blueprint-diff 366b0b68-d80e-4bc1-abd3-dc69837847e0 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt index 9d2cbb99d2f..ab5dfe0f998 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt @@ -12,6 +12,10 @@ blueprint-diff dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 8da82a8e-bf97-4fbd-8ddd-9f64 blueprint-edit 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 mark-for-cleanup 99e2f30b-3174-40bf-a78a-90da8abba8ca blueprint-diff 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +# Set the add_zones_with_mupdate_override planner config to ensure that zone +# adds happen despite zone image sources not being Artifact. +set planner-config --add-zones-with-mupdate-override true + # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint-diff 58d5e830-0884-47d8-a7cd-b2b3751adeb4 af934083-59b5-4bf6-8966-6fb5292c29e1 diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt index 37afbd2471f..097cc3c36f0 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt @@ -104,9 +104,6 @@ sled-set serial2 mupdate-override unset # Run the planner again. This will cause sled 2's blueprint # remove_mupdate_override to be unset. But no further planning steps will # happen because the target release generation is not new enough. -# -# TODO: we want to block further planning steps until the target release is -# uploaded and all install-dataset zones have been converted to artifact ones. inventory-generate blueprint-plan latest latest blueprint-show latest @@ -119,10 +116,19 @@ blueprint-plan latest latest blueprint-show latest blueprint-diff latest -# Now clear the mupdate override error. At this point, the rest of the -# planner starts working. +# Now clear the mupdate override error. At this point, we're *still* blocked +# on serial1's install dataset not being known (so cannot be noop converted). sled-set serial1 mupdate-override unset inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Update serial1's install dataset. Finally the planner is unblocked. (Note that +# we do noop conversions of serial1 to Artifact and the unblocking of further +# steps within the same blueprint. In other words, the code that considers +# whether add/update are blocked takes into account noop conversions.) +sled-update-install-dataset serial1 --to-target-release +inventory-generate # This will attempt to update the RoT bootloader on the first sled. blueprint-plan latest latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 8c10f204883..3d0d1d0c382 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -7,6 +7,42 @@ show # Create a TUF repository from a fake manifest. (The output TUF repo is # written to a temporary directory that this invocation of `reconfigurator-cli` # is running out of as its working directory.) +# +# This is used to simulate the initial version of the system. +tuf-assemble ../../update-common/manifests/fake-0.0.1.toml + +# Load the target release from the assembled TUF repository. +set target-release repo-0.0.1.zip + +# Print the default target release. +show + +# Update the install dataset on all sleds to the target release. +# This will cause zones to be noop converted over to Artifact, +# unblocking upgrades. +sled-update-install-dataset serial0 --to-target-release +sled-update-install-dataset serial1 --to-target-release +sled-update-install-dataset serial2 --to-target-release + +# Generate inventory, then do a planning run to ensure that all zones +# are set to Artifact. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest +# The above blueprint includes a pending MGS update, which we should delete +# (we want to start from a fresh state). +blueprint-edit latest delete-sp-update serial0 +# Also set the Omicron config for all sleds to reflect the +# corresponding image sources. +sled-set serial0 omicron-config latest +sled-set serial1 omicron-config latest +sled-set serial2 omicron-config latest +# Generate inventory once more to reflect the omicron config changes. +inventory-generate +inventory-show latest + +# Setup is now done -- create another TUF repository which will act as the +# target release being updated to. tuf-assemble ../../update-common/manifests/fake.toml # Load the target release from the assembled TUF repository. @@ -17,14 +53,13 @@ show # Test that this state survives a save/load operation. save saved.out -wipe all +wipe system # This should NOT show the target release. show -load saved.out +load saved.out 61f451b3-2121-4ed6-91c7-a550054f6c21 # This should show the target release. show - # Great. Now, let's run through an upgrade! # First, print out what we've got. sled-list diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index be584cb4d6f..167705a3cdf 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -33,16 +33,34 @@ added sled 00320471-945d-413c-85e7-03e091a70b3c (serial: serial3) generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds +> # Set the add_zones_with_mupdate_override planner config to ensure that zone +> # adds happen despite zone image sources not being Artifact. +> set planner-config --add-zones-with-mupdate-override true +planner config updated: +* add zones with mupdate override: false -> true + + + > # Try to plan a new blueprint; this should be okay even though the sled > # we added has no disks. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 INFO skipping noop image source check for all sleds, reason: no target release is currently set -WARN cannot issue more MGS-driven updates (no current artifacts) generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * no zpools in service for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * discretionary zone placement waiting for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c +* zone updates waiting on zone add blockers @@ -279,8 +297,19 @@ parent: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * no zpools in service for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * discretionary zone placement waiting for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c +* zone updates waiting on zone add blockers diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index ac06e5ba120..aa599ee1139 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -599,10 +599,15 @@ T ENA ID PARENT > blueprint-plan ade5749d-bdf3-4fab-a8ae-00bea01b3a5a INFO skipping noop image source check for all sleds, reason: no target release is currently set -WARN cannot issue more MGS-driven updates (no current artifacts) generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: -empty planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41. +planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41: +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 89d02b1b-478c-401a-8e28-7a26f74fa41b: 18 zones + +* zone updates waiting on zone add blockers @@ -1839,10 +1844,16 @@ INTERNAL DNS STATUS > # sled to be expunged. > blueprint-plan latest INFO skipping noop image source check for all sleds, reason: no target release is currently set -WARN cannot issue more MGS-driven updates (no current artifacts) generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: -empty planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41. +planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41: +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2eb69596-f081-4e2d-9425-9994926e0832: 4 zones + - sled 89d02b1b-478c-401a-8e28-7a26f74fa41b: 17 zones + +* zone updates waiting on zone add blockers diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index 89b4f0307f9..4f5c22d684f 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -837,13 +837,31 @@ blueprint source: edited directly with reconfigurator-cli + +> # Set the add_zones_with_mupdate_override planner config to ensure that zone +> # adds happen despite zone image sources not being Artifact. +> set planner-config --add-zones-with-mupdate-override true +planner config updated: +* add zones with mupdate override: false -> true + + + > # blueprint-plan will place a new external DNS zone, diff DNS to see the new zone has `ns` and NS records. > blueprint-plan 366b0b68-d80e-4bc1-abd3-dc69837847e0 INFO skipping noop image source check for all sleds, reason: no target release is currently set -WARN cannot issue more MGS-driven updates (no current artifacts) generated blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 based on parent blueprint 366b0b68-d80e-4bc1-abd3-dc69837847e0 blueprint source: planner with report: planning report for blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a: 15 zones + - sled 9dc50690-f9bf-4520-bf80-051d0f465c2c: 15 zones + - sled a88790de-5962-4871-8686-61c1fd5b7094: 15 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zones placed: * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones @@ -1343,6 +1361,16 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 blueprint source: planner with report: planning report for blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a: 15 zones + - sled 9dc50690-f9bf-4520-bf80-051d0f465c2c: 15 zones + - sled a88790de-5962-4871-8686-61c1fd5b7094: 15 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zones placed: * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index 4972f618ebe..bc8ea7f2ba8 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -645,13 +645,30 @@ external DNS: +> # Set the add_zones_with_mupdate_override planner config to ensure that zone +> # adds happen despite zone image sources not being Artifact. +> set planner-config --add-zones-with-mupdate-override true +planner config updated: +* add zones with mupdate override: false -> true + + + > # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 INFO skipping noop image source check for all sleds, reason: no target release is currently set -WARN cannot issue more MGS-driven updates (no current artifacts) generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 15 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 15 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 15 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zones placed: * internal_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source install dataset * zone updates waiting on discretionary zones diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 6f14309436f..1a89e3d3aec 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -510,6 +510,11 @@ planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: - current target release generation (2) is lower than minimum required by blueprint (3) - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 6 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -786,6 +791,11 @@ planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 6 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -915,6 +925,10 @@ planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: - current target release generation (3) is lower than minimum required by blueprint (4) - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1069,9 +1083,6 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: 1c0ce176-6dc8-4a > # Run the planner again. This will cause sled 2's blueprint > # remove_mupdate_override to be unset. But no further planning steps will > # happen because the target release generation is not new enough. -> # -> # TODO: we want to block further planning steps until the target release is -> # uploaded and all install-dataset zones have been converted to artifact ones. > inventory-generate generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds @@ -1103,6 +1114,10 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: * zone adds and updates are blocked: - current target release generation (3) is lower than minimum required by blueprint (4) - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1281,6 +1296,10 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: * zone adds and updates are blocked: - current target release generation (3) is lower than minimum required by blueprint (4) - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1424,6 +1443,9 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1602,6 +1624,9 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have mupdate override errors: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1692,18 +1717,83 @@ external DNS: -> # Now clear the mupdate override error. At this point, the rest of the -> # planner starts working. +> # Now clear the mupdate override error. At this point, we're *still* blocked +> # on serial1's install dataset not being known (so cannot be noop converted). > sled-set serial1 mupdate-override unset set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: error -> unset > inventory-generate generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +blueprint source: planner with report: +planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* zone adds waiting on blockers +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + +* zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated + + + +> blueprint-diff latest +from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 39 (records: 53) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Update serial1's install dataset. Finally the planner is unblocked. (Note that +> # we do noop conversions of serial1 to Artifact and the unblocking of further +> # steps within the same blueprint. In other words, the code that considers +> # whether add/update are blocked takes into account noop conversions.) +> sled-update-install-dataset serial1 --to-target-release +sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: install dataset updated: to target release (system version 2.0.0) + +> inventory-generate +generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds + > # This will attempt to update the RoT bootloader on the first sled. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 7, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 @@ -1714,24 +1804,25 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO configuring MGS-driven update, artifact_version: 2.0.0, artifact_hash: 8f89bf8bc5f3271650ad72a26fc0d116c910161ca143731473a2b20fb82653cc, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* noop converting 7/7 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) -* only placed 0/2 desired nexus zones +* only placed 0/1 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated > blueprint-show latest -blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 -parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 +blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +parent: 8f2d1f39-7c88-4701-aa43-56bf281b28c1 - sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5) + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 6) host phase 2 contents: ------------------------ @@ -1768,16 +1859,16 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 2.0.0 in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 2.0.0 in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 2.0.0 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 2.0.0 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 2.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 2.0.0 in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 2.0.0 in service fd00:1122:3344:102::22 @@ -1885,7 +1976,7 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 METADATA: created by::::::::::::: reconfigurator-sim created at::::::::::::: - comment:::::::::::::::: update Sled 0 (serial0) RoT bootloader to 2.0.0 + comment:::::::::::::::: update Sled 0 (serial0) RoT bootloader to 2.0.0, sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: performed 7 noop zone image source updates internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 @@ -1900,12 +1991,13 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 blueprint source: planner with report: -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts +* noop converting 7/7 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) -* only placed 0/2 desired nexus zones +* only placed 0/1 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -1913,8 +2005,66 @@ planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: > blueprint-diff latest -from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 -to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 - install dataset in service fd00:1122:3344:102::23 + └─ + artifact: version 2.0.0 +* crucible bd354eef-d8a6-4165-9124-283fb5e46d77 - install dataset in service fd00:1122:3344:102::26 + └─ + artifact: version 2.0.0 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 - install dataset in service fd00:1122:3344:102::25 + └─ + artifact: version 2.0.0 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a - install dataset in service fd00:1122:3344:102::24 + └─ + artifact: version 2.0.0 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca - install dataset in service fd00:1122:3344:1::1 + └─ + artifact: version 2.0.0 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 - install dataset in service fd00:1122:3344:102::21 + └─ + artifact: version 2.0.0 +* nexus 466a9f29-62bf-4e63-924a-b9efdb86afec - install dataset in service fd00:1122:3344:102::22 + └─ + artifact: version 2.0.0 + COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -2011,10 +2161,10 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, > # All MGS-based updates complete. > inventory-generate -generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds +generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -2025,27 +2175,28 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01 (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f55647d4-5500-4ad3-893a-df45bd50d622 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }] -generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 353b3b65-20f7-48c3-88f7-495bd5d31545 (service), zone_kind: Clickhouse, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 466a9f29-62bf-4e63-924a-b9efdb86afec (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 62620961-fc4a-481e-968b-f5acbac0dc63 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 99e2f30b-3174-40bf-a78a-90da8abba8ca (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: bd354eef-d8a6-4165-9124-283fb5e46d77 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01 (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f55647d4-5500-4ad3-893a-df45bd50d622 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }] +generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 blueprint source: planner with report: -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 7 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * noop converting host phase 2 slot B to Artifact on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * noop converting host phase 2 slot B to Artifact on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting host phase 2 slot B to Artifact on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 -* only placed 0/2 desired nexus zones +* only placed 0/1 desired nexus zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated > blueprint-diff latest -from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 -to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 6 -> 7): host phase 2 contents: -------------------------------- @@ -2083,16 +2234,16 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 2.0.0 in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 2.0.0 in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 2.0.0 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 2.0.0 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 2.0.0 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 2.0.0 in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 2.0.0 in service fd00:1122:3344:102::22 sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): @@ -2238,7 +2389,7 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> c8fba91 added sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b (serial: serial3) > inventory-generate -generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds +generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds > # This will *not* generate the datasets and internal NTP zone on the new @@ -2257,7 +2408,7 @@ INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupda INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 4, new_generation: 5 -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (c8fba912-63ae-473a-9115-0495d10fb3bc) INFO performed noop zone image source checks on sled, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, num_total: 0, num_already_artifact: 0, num_eligible: 0, num_ineligible: 0 @@ -2265,23 +2416,27 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 blueprint source: planner with report: -planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 7 zones are already from artifacts * skipping noop zone image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * zone adds waiting on blockers * zone adds and updates are blocked: - current target release generation (4) is lower than minimum required by blueprint (5) - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 6 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated > blueprint-diff latest -from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 -to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c MODIFIED SLEDS: @@ -2384,7 +2539,7 @@ planner config updated: > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (c8fba912-63ae-473a-9115-0495d10fb3bc) INFO performed noop zone image source checks on sled, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, num_total: 0, num_already_artifact: 0, num_eligible: 0, num_ineligible: 0 @@ -2393,17 +2548,21 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO altered physical disks, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, sled_edits: SledEditCounts { disks: EditCounts { added: 10, updated: 0, expunged: 0, removed: 0 }, datasets: EditCounts { added: 20, updated: 0, expunged: 0, removed: 0 }, zones: EditCounts { added: 0, updated: 0, expunged: 0, removed: 0 } } -generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c blueprint source: planner with report: -planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: planner config: add zones with mupdate override: true +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 7 zones are already from artifacts * skipping noop zone image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * zone adds and updates are blocked: - current target release generation (4) is lower than minimum required by blueprint (5) - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 6 zones + * adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zone placement waiting for NTP zones on sleds: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * missing NTP zone on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b @@ -2414,8 +2573,8 @@ planner config: > blueprint-diff latest -from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 -to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +from: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f MODIFIED SLEDS: @@ -2449,34 +2608,34 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- dataset name dataset id disposition quota reservation compression ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone 1e98b76f-11ed-4b79-8759-cbecd2f7f0d9 in service none none off -+ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/zone e20c48ae-597e-4341-bf40-eeeb76c3762c in service none none off -+ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/zone 2bc7995d-9c70-4641-80a7-857c48fd1950 in service none none off -+ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/zone ada18d30-b0c4-46c8-9179-18b0538e9aaa in service none none off -+ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/zone 3a517a41-e948-4dd3-8792-cc44766db624 in service none none off -+ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/zone 31120549-ea59-4c77-ba4d-bb7c7533ed07 in service none none off -+ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/zone 300a9158-6179-4a27-b602-b5b508ffe753 in service none none off -+ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/zone 3e9ff584-4500-4ec7-ba7e-4c072ab1a930 in service none none off -+ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/zone 3692a85a-ed9a-432a-a83b-ae0be332b5f4 in service none none off -+ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/zone a30fa102-f04d-40d9-89bb-1b99531e3352 in service none none off -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone/oxz_ntp_6af1f5f5-6a16-40f0-a830-651e738967aa 1bcd66fb-3ab2-43e9-8f1f-7109e699b00f in service none none off -+ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/debug fd79ae88-b57c-4119-8d75-e2a08d1f0754 in service 100 GiB none gzip-9 -+ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/debug 7fe7a330-9e24-4257-b342-52c91a95b727 in service 100 GiB none gzip-9 -+ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/debug 8e31b075-0e5c-4908-be7e-2d8be8292b72 in service 100 GiB none gzip-9 -+ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/debug 5b97259a-9114-4c66-987c-bcb0bf1b63ac in service 100 GiB none gzip-9 -+ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/debug c96c0f0a-1e9c-44c0-a774-b86c0e699474 in service 100 GiB none gzip-9 -+ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/debug abf8aedc-712e-43f0-a142-ec36543f987a in service 100 GiB none gzip-9 -+ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/debug 3efd9ba9-0319-44d0-a13a-9abc9aa3ce7b in service 100 GiB none gzip-9 -+ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/debug 12dc8471-bafb-46ae-bb19-4cc791dff15b in service 100 GiB none gzip-9 -+ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/debug cdfb4f8b-a90f-4ba2-a620-f228a405f231 in service 100 GiB none gzip-9 -+ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/debug aa61d941-f193-4a8a-8918-e682b96968d9 in service 100 GiB none gzip-9 ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone 902e330d-4d6e-4b2c-8e75-83abd725278e in service none none off ++ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/zone b1852e8a-77a5-4069-950f-f414eb89b489 in service none none off ++ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/zone 76435db9-395e-4207-a1eb-80fd31462763 in service none none off ++ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/zone fd3b32f6-840a-4831-b27f-b65052d8c00b in service none none off ++ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/zone 5cc4f3dc-459c-42fe-b7b5-493b6d72d90c in service none none off ++ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/zone be939f45-3895-4ac2-8140-8345ba808ef0 in service none none off ++ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/zone 36c2157d-00f9-4934-b465-17c6dff5aa5b in service none none off ++ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/zone a777b70b-2d04-4cc1-8afc-85e46460ec1f in service none none off ++ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/zone 3f037273-07b9-4852-b364-bd885d16aee5 in service none none off ++ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/zone 7647e033-19a3-400c-9bcc-d19d569715c5 in service none none off ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/zone/oxz_ntp_3c3c2ba0-1c7d-4005-b02c-98366435a2c1 58255ab2-30ff-448b-a049-a9ef6b0fd0c5 in service none none off ++ oxp_1a2d5932-ca5b-44b8-a0a4-31215d934293/crypt/debug 8d52d9dc-7e33-4a6f-9548-8a43048fb34e in service 100 GiB none gzip-9 ++ oxp_2454c824-3b2b-4350-a5c7-ce9b1ff5a903/crypt/debug e1a49e92-5d7a-46a4-a1ff-b53fcb7190af in service 100 GiB none gzip-9 ++ oxp_5369d002-dd19-48f2-8c08-ba063cb1e010/crypt/debug 97723fdc-18bb-43c5-b81a-6cda8b6b5a1f in service 100 GiB none gzip-9 ++ oxp_5e25e3a5-d115-4df0-a54b-81f29fbb9d61/crypt/debug b6dc1ce7-759e-4c13-a05b-116007ac3be0 in service 100 GiB none gzip-9 ++ oxp_6082ad32-e210-4786-9656-4b6bfcec5d05/crypt/debug cf9455fe-cfe0-4ae9-ac8a-31a7caf58c03 in service 100 GiB none gzip-9 ++ oxp_88659529-1c87-4107-8227-c9276a330bf5/crypt/debug 776bda33-4677-47c9-b2d7-ac4eeb0d81d3 in service 100 GiB none gzip-9 ++ oxp_9cde7847-ff58-41af-95bf-aecebc3f1344/crypt/debug 7bde4b36-0f64-4e03-9767-f0316ab60409 in service 100 GiB none gzip-9 ++ oxp_9e9ca75a-6a79-4706-bcce-35714d79913c/crypt/debug e716eab0-d1ba-43b4-8111-94d4565e1aa9 in service 100 GiB none gzip-9 ++ oxp_d8c90412-fbdb-480d-8a07-c3de73b441bc/crypt/debug 3b33c570-ecdb-4fb4-b856-3b108bf90920 in service 100 GiB none gzip-9 ++ oxp_f3052185-bebb-4e75-842b-d8125b70bb8a/crypt/debug e38d5c18-c24a-4b05-84e8-1726f2015746 in service 100 GiB none gzip-9 omicron zones: -------------------------------------------------------------------------------------------------------------------- zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------- -+ internal_ntp 6af1f5f5-6a16-40f0-a830-651e738967aa artifact: version 2.0.0 in service fd00:1122:3344:104::21 ++ internal_ntp 3c3c2ba0-1c7d-4005-b02c-98366435a2c1 artifact: version 2.0.0 in service fd00:1122:3344:104::21 COCKROACHDB SETTINGS: @@ -2496,15 +2655,15 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 6af1f5f5-6a16-40f0-a830-651e738967aa.host (records: 1) ++ name: 3c3c2ba0-1c7d-4005-b02c-98366435a2c1.host (records: 1) + AAAA fd00:1122:3344:104::21 * name: _internal-ntp._tcp (records: 3 -> 4) - SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal ++ SRV port 123 3c3c2ba0-1c7d-4005-b02c-98366435a2c1.host.control-plane.oxide.internal + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal -+ SRV port 123 6af1f5f5-6a16-40f0-a830-651e738967aa.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal unchanged names: 39 (records: 52) @@ -2519,23 +2678,23 @@ external DNS: > # test that the planner bails if it attempts a rollback of the target release > # minimum generation. > blueprint-edit latest set-target-release-min-gen 1000 -blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f created from latest blueprint (27e755bc-dc10-4647-853c-f89bb3a15a2c): set target release minimum generation to 1000 +blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 created from latest blueprint (9f89efdf-a23e-4137-b7cc-79f4a91cbe1f): set target release minimum generation to 1000 > sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: unset -> cc724abe-80c1-47e6-9771-19e6540531a9 > inventory-generate -generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds +generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds > blueprint-plan latest latest INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, new_bp_override: cc724abe-80c1-47e6-9771-19e6540531a9, prev_bp_override: None, zones: - - zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (Clickhouse) left unchanged, image source: install dataset - - zone 466a9f29-62bf-4e63-924a-b9efdb86afec (Nexus) left unchanged, image source: install dataset - - zone 62620961-fc4a-481e-968b-f5acbac0dc63 (InternalNtp) left unchanged, image source: install dataset - - zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (ExternalDns) left unchanged, image source: install dataset - - zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (InternalDns) left unchanged, image source: install dataset - - zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (CruciblePantry) left unchanged, image source: install dataset - - zone bd354eef-d8a6-4165-9124-283fb5e46d77 (Crucible) left unchanged, image source: install dataset + - zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (Clickhouse) updated from artifact: version 2.0.0 to install dataset + - zone 466a9f29-62bf-4e63-924a-b9efdb86afec (Nexus) updated from artifact: version 2.0.0 to install dataset + - zone 62620961-fc4a-481e-968b-f5acbac0dc63 (InternalNtp) updated from artifact: version 2.0.0 to install dataset + - zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (ExternalDns) updated from artifact: version 2.0.0 to install dataset + - zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (InternalDns) updated from artifact: version 2.0.0 to install dataset + - zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (CruciblePantry) updated from artifact: version 2.0.0 to install dataset + - zone bd354eef-d8a6-4165-9124-283fb5e46d77 (Crucible) updated from artifact: version 2.0.0 to install dataset , host_phase_2: - host phase 2 slot A: current contents (unchanged) - host phase 2 slot B: updated from artifact (version version 2.0.0, hash 9ff631b5b7229604ab7c5aae2ee4a34a64772736b332540d38077b3aea6952df) to preserving current contents diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index ab1d658da34..f5814c9fef2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -188,6 +188,13 @@ planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled aff6c093-197d-42c5-ad80-9f10ba051a34: 1 zone + - sled b82ede02-399c-48c6-a1de-411df4fa49a7: 2 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 2 zones + - sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e: 2 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -429,6 +436,12 @@ planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 7 zones + - sled aff6c093-197d-42c5-ad80-9f10ba051a34: 1 zone + - sled b82ede02-399c-48c6-a1de-411df4fa49a7: 2 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 2 zones + * zone updates waiting on zone add blockers * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 30a7e7e967d..313caa94393 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -22,6 +22,897 @@ planner config: > # Create a TUF repository from a fake manifest. (The output TUF repo is > # written to a temporary directory that this invocation of `reconfigurator-cli` > # is running out of as its working directory.) +> # +> # This is used to simulate the initial version of the system. +> tuf-assemble ../../update-common/manifests/fake-0.0.1.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-0.0.1.zip`, component: OmicronRepoAssembler +created repo-0.0.1.zip for system version 0.0.1 + + +> # Load the target release from the assembled TUF repository. +> set target-release repo-0.0.1.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 0.0.1, hash: 716e29860eade5de4cf28d2c81f1c3fcaf3a3c07af52961c0e231e3dd0ba4db8, length: 734 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_a, version: 0.0.1, hash: 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985, length: 787 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_b, version: 0.0.1, hash: 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985, length: 787 +INFO added artifact, name: fake-rot-bootloader, kind: gimlet_rot_bootloader, version: 0.0.1, hash: 5bfb2fef5a25100e7813636699bd365bbcd623980ae00e876ad705ef591feded, length: 794 +INFO added artifact, name: fake-host, kind: gimlet_host_phase_1, version: 0.0.1, hash: 143aa9751a0bb16ab3d2c8b56d2874eeab14e1ac3413aa0edf1dbf56900f3fcc, length: 524288 +INFO added artifact, name: fake-host, kind: cosmo_host_phase_1, version: 0.0.1, hash: 0c43c53453b1113d8ec83d0e3cb8139094b6f4594c304645a33248863141bac6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 0.0.1, hash: 7cd830e1682d50620de0f5c24b8cca15937eb10d2a415ade6ad28c0d314408eb, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: gimlet_trampoline_phase_1, version: 0.0.1, hash: 040e9ffbd212b790da4fc3a6376c9ff102c852c4ac1f1a1bc84c7d8edc64029f, length: 524288 +INFO added artifact, name: fake-trampoline, kind: cosmo_trampoline_phase_1, version: 0.0.1, hash: bfc7d2bf0d2e5dde41b8ea85beca7f01262688297f438727fdeb8543c1ceb25e, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 0.0.1, hash: a05417d8d03400b9d556b63563c9958da983a0cdcc3259669966ad45e395c277, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 0.0.1, hash: 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 0.0.1, hash: f27ef7d2ce10696c4583ea194cdf61c3907f2143f666af964b8ed3bee1346be0, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 0.0.1, hash: bc35f79e04956e284c230f324fe7475ad5cb2ede08e6b4a77addcd9e6f50d33b, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 0.0.1, hash: a1dc64b896b4bb5d0d295f63b5edeb82b3f945e1f830b06c32f96f9de30b93d1, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 0.0.1, hash: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 0.0.1, hash: 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 0.0.1, hash: 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 0.0.1, hash: c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 0.0.1, hash: b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 0.0.1, hash: 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 0.0.1, hash: 7ea25be50cd4e98e2ba20916cb98fe8ea457372f5973eb6ac691b5bc90dbddc0, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 0.0.1, hash: 3a63db2465b433f7b2f2816f833dcce90e0aa7e7472b1735c63faf93a48bb2ab, length: 726 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 0.0.1, hash: 9bdc198ad072c74cfc1e145355eef307028067776b19f9e2a7830934176fe406, length: 770 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 0.0.1, hash: 9bdc198ad072c74cfc1e145355eef307028067776b19f9e2a7830934176fe406, length: 770 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 0.0.1, hash: a58c577f5c33e0a8176f078183a0c94b84ab1e1e7118c441f6b82551fba58f46, length: 794 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 0.0.1, hash: 9a559c6734981ec74fee73a56826f8a91beec39a59dea497f67d55c91ab74328, length: 736 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 0.0.1, hash: 7776db817d1f1b1a2f578050742e33bd4e805a4c76f36bce84dcb509b900249c, length: 776 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 0.0.1, hash: 7776db817d1f1b1a2f578050742e33bd4e805a4c76f36bce84dcb509b900249c, length: 776 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 0.0.1, hash: 0686443d50db2247077dc70b6543cea9a90a9792de00e06c06cff4c91fa5a4a8, length: 792 +INFO added artifact, name: installinator_document, kind: installinator_document, version: 0.0.1, hash: 657aaebc9c2f451446af0411a67a4bd057f39fa1b8a7fdc429ca4a2facd9344c, length: 367 +set target release based on repo-0.0.1.zip + + +> # Print the default target release. +> show +configured external DNS zone name: oxide.example +configured silo names: example-silo +internal DNS generations: 1 +external DNS generations: 1 +target number of Nexus instances: default +target release (generation 2): 0.0.1 (system-update-v0.0.1.zip) + artifact: 716e29860eade5de4cf28d2c81f1c3fcaf3a3c07af52961c0e231e3dd0ba4db8 gimlet_sp (fake-gimlet-sp version 0.0.1) + artifact: 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985 gimlet_rot_image_a (fake-rot version 0.0.1) + artifact: 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985 gimlet_rot_image_b (fake-rot version 0.0.1) + artifact: 5bfb2fef5a25100e7813636699bd365bbcd623980ae00e876ad705ef591feded gimlet_rot_bootloader (fake-rot-bootloader version 0.0.1) + artifact: 143aa9751a0bb16ab3d2c8b56d2874eeab14e1ac3413aa0edf1dbf56900f3fcc gimlet_host_phase_1 (fake-host version 0.0.1) + artifact: 0c43c53453b1113d8ec83d0e3cb8139094b6f4594c304645a33248863141bac6 cosmo_host_phase_1 (fake-host version 0.0.1) + artifact: 7cd830e1682d50620de0f5c24b8cca15937eb10d2a415ade6ad28c0d314408eb host_phase_2 (fake-host version 0.0.1) + artifact: 040e9ffbd212b790da4fc3a6376c9ff102c852c4ac1f1a1bc84c7d8edc64029f gimlet_trampoline_phase_1 (fake-trampoline version 0.0.1) + artifact: bfc7d2bf0d2e5dde41b8ea85beca7f01262688297f438727fdeb8543c1ceb25e cosmo_trampoline_phase_1 (fake-trampoline version 0.0.1) + artifact: a05417d8d03400b9d556b63563c9958da983a0cdcc3259669966ad45e395c277 trampoline_phase_2 (fake-trampoline version 0.0.1) + artifact: 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5 zone (clickhouse version 0.0.1) + artifact: f27ef7d2ce10696c4583ea194cdf61c3907f2143f666af964b8ed3bee1346be0 zone (clickhouse_keeper version 0.0.1) + artifact: bc35f79e04956e284c230f324fe7475ad5cb2ede08e6b4a77addcd9e6f50d33b zone (clickhouse_server version 0.0.1) + artifact: a1dc64b896b4bb5d0d295f63b5edeb82b3f945e1f830b06c32f96f9de30b93d1 zone (cockroachdb version 0.0.1) + artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 zone (crucible-zone version 0.0.1) + artifact: 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d zone (crucible-pantry-zone version 0.0.1) + artifact: 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7 zone (external-dns version 0.0.1) + artifact: c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173 zone (internal-dns version 0.0.1) + artifact: b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531 zone (ntp version 0.0.1) + artifact: 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc zone (nexus version 0.0.1) + artifact: 7ea25be50cd4e98e2ba20916cb98fe8ea457372f5973eb6ac691b5bc90dbddc0 zone (oximeter version 0.0.1) + artifact: 3a63db2465b433f7b2f2816f833dcce90e0aa7e7472b1735c63faf93a48bb2ab psc_sp (fake-psc-sp version 0.0.1) + artifact: 9bdc198ad072c74cfc1e145355eef307028067776b19f9e2a7830934176fe406 psc_rot_image_a (fake-psc-rot version 0.0.1) + artifact: 9bdc198ad072c74cfc1e145355eef307028067776b19f9e2a7830934176fe406 psc_rot_image_b (fake-psc-rot version 0.0.1) + artifact: a58c577f5c33e0a8176f078183a0c94b84ab1e1e7118c441f6b82551fba58f46 psc_rot_bootloader (fake-psc-rot-bootloader version 0.0.1) + artifact: 9a559c6734981ec74fee73a56826f8a91beec39a59dea497f67d55c91ab74328 switch_sp (fake-switch-sp version 0.0.1) + artifact: 7776db817d1f1b1a2f578050742e33bd4e805a4c76f36bce84dcb509b900249c switch_rot_image_a (fake-switch-rot version 0.0.1) + artifact: 7776db817d1f1b1a2f578050742e33bd4e805a4c76f36bce84dcb509b900249c switch_rot_image_b (fake-switch-rot version 0.0.1) + artifact: 0686443d50db2247077dc70b6543cea9a90a9792de00e06c06cff4c91fa5a4a8 switch_rot_bootloader (fake-switch-rot-bootloader version 0.0.1) + artifact: 657aaebc9c2f451446af0411a67a4bd057f39fa1b8a7fdc429ca4a2facd9344c installinator_document (installinator_document version 0.0.1) +planner config: + add zones with mupdate override: false + + + +> # Update the install dataset on all sleds to the target release. +> # This will cause zones to be noop converted over to Artifact, +> # unblocking upgrades. +> sled-update-install-dataset serial0 --to-target-release +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: to target release (system version 0.0.1) + +> sled-update-install-dataset serial1 --to-target-release +sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: install dataset updated: to target release (system version 0.0.1) + +> sled-update-install-dataset serial2 --to-target-release +sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: to target release (system version 0.0.1) + + +> # Generate inventory, then do a planning run to ensure that all zones +> # are set to Artifact. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 9, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 8, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 8, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO configuring MGS-driven update, artifact_version: 0.0.1, artifact_hash: 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO reached maximum number of pending MGS-driven updates, max: 1 +generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +blueprint source: planner with report: +planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +* noop converting 9/9 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* noop converting 8/8 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* noop converting 8/8 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 +* 1 pending MGS update: + * model0:serial0: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) +* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) + + + +> blueprint-diff latest +from: blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 - install dataset in service fd00:1122:3344:102::23 + └─ + artifact: version 0.0.1 +* crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 - install dataset in service fd00:1122:3344:102::28 + └─ + artifact: version 0.0.1 +* crucible bd354eef-d8a6-4165-9124-283fb5e46d77 - install dataset in service fd00:1122:3344:102::26 + └─ + artifact: version 0.0.1 +* crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c - install dataset in service fd00:1122:3344:102::27 + └─ + artifact: version 0.0.1 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 - install dataset in service fd00:1122:3344:102::25 + └─ + artifact: version 0.0.1 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a - install dataset in service fd00:1122:3344:102::24 + └─ + artifact: version 0.0.1 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca - install dataset in service fd00:1122:3344:1::1 + └─ + artifact: version 0.0.1 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 - install dataset in service fd00:1122:3344:102::21 + └─ + artifact: version 0.0.1 +* nexus 466a9f29-62bf-4e63-924a-b9efdb86afec - install dataset in service fd00:1122:3344:102::22 + └─ + artifact: version 0.0.1 + + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible 058fd5f9-60a8-4e11-9302-15172782e17d - install dataset in service fd00:1122:3344:101::27 + └─ + artifact: version 0.0.1 +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + └─ + artifact: version 0.0.1 +* crucible dfac80b4-a887-430a-ae87-a4e065dba787 - install dataset in service fd00:1122:3344:101::26 + └─ + artifact: version 0.0.1 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 - install dataset in service fd00:1122:3344:101::24 + └─ + artifact: version 0.0.1 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d - install dataset in service fd00:1122:3344:101::23 + └─ + artifact: version 0.0.1 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c - install dataset in service fd00:1122:3344:2::1 + └─ + artifact: version 0.0.1 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c - install dataset in service fd00:1122:3344:101::21 + └─ + artifact: version 0.0.1 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - install dataset in service fd00:1122:3344:101::22 + └─ + artifact: version 0.0.1 + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 - install dataset in service fd00:1122:3344:103::26 + └─ + artifact: version 0.0.1 +* crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 - install dataset in service fd00:1122:3344:103::27 + └─ + artifact: version 0.0.1 +* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - install dataset in service fd00:1122:3344:103::25 + └─ + artifact: version 0.0.1 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 - install dataset in service fd00:1122:3344:103::24 + └─ + artifact: version 0.0.1 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 - install dataset in service fd00:1122:3344:103::23 + └─ + artifact: version 0.0.1 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 - install dataset in service fd00:1122:3344:3::1 + └─ + artifact: version 0.0.1 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 - install dataset in service fd00:1122:3344:103::21 + └─ + artifact: version 0.0.1 +* nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 - install dataset in service fd00:1122:3344:103::22 + └─ + artifact: version 0.0.1 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ++ sled 0 model0 serial0 244d553f832cf74043bbcc8a747c8d05384a0f89f7809dcab28c3f707b11f985 0.0.1 Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + +> # The above blueprint includes a pending MGS update, which we should delete +> # (we want to start from a fresh state). +> blueprint-edit latest delete-sp-update serial0 +blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 created from latest blueprint (8da82a8e-bf97-4fbd-8ddd-9f6462732cf1): deleted configured update for serial serial0 + +> # Also set the Omicron config for all sleds to reflect the +> # corresponding image sources. +> sled-set serial0 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4) + +> sled-set serial1 omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4) + +> sled-set serial2 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4) + +> # Generate inventory once more to reflect the omicron config changes. +> inventory-generate +generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds + +> inventory-show latest +collection: 61f451b3-2121-4ed6-91c7-a550054f6c21 +collector: example +started: +done: +errors: 0 + +SLED AGENTS + +sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (role = Gimlet, serial serial1) + found at: from fake sled agent + address: [fd00:1122:3344:102::1]:12345 + usable hw threads: 10 + CPU family: amd_milan + usable memory (GiB): 0 + reservoir (GiB): 0 + physical disks: + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-727522a7-934f-494d-b5b3-160968e74463" } in 0 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-72c59873-31ff-4e36-8d76-ff834009349a" } in 1 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b" } in 2 + zpools + 727522a7-934f-494d-b5b3-160968e74463: total size: 100 GiB + 72c59873-31ff-4e36-8d76-ff834009349a: total size: 100 GiB + b5fd5bc1-099e-4e77-8028-a9793c11f43b: total size: 100 GiB + datasets: + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 - id: 09b9cc9b-3426-470b-a7bc-538f82dede03, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible - id: 1640adb6-70bf-44cf-b05c-bff6dd300cf3, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns - id: 2ad1875a-92ac-472f-8c26-593309f0e4da, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug - id: 2db6b7c1-0f46-4ced-a3ad-48872793360e, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible - id: 2f204c50-a327-479c-8852-f53ec7a19c1f, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug - id: 318fae85-abcb-4259-b1b6-ac96d193f7b7, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec - id: 3560dd69-3b23-4c69-807d-d673104cfc68, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone - id: 3b66453b-7148-4c1b-84a9-499e43290ab4, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 - id: 3e0d6188-c503-49cf-a441-fa7df40ceb43, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone - id: 4829f422-aa31-41a8-ab73-95684ff1ef48, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 - id: 49f8fbb6-5bac-4609-907f-6e3dfc206059, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 - id: 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone - id: 775f9207-c42d-4af2-9186-27ffef67735e, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible - id: 78f34ce7-42f1-41da-995f-318f32054ad2, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse - id: 841d5648-05f0-47b0-b446-92f6b60fe9a6, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a - id: 8c4fa711-1d5d-4e93-85f0-d17bff47b063, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns - id: 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug - id: 93957ca0-9ed1-4e7b-8c34-2ce07a69541c, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 - id: b46de15d-33e7-4cd0-aa7c-e7be2a61e71b, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c - id: b8f2a09f-8bd2-4418-872b-a4457a3f958c, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca - id: c31623de-c19b-4615-9f1d-5e1daa5d3bda, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + +LEDGERED SLED CONFIG + generation: 3 + remove_mupdate_override: None + desired host phase 2 slot a: keep current contents + desired host phase 2 slot b: keep current contents + DISKS: 3 + ID ZPOOL_ID VENDOR MODEL SERIAL + 141ce987-3dd1-4650-8af2-6b3ace85de1e b5fd5bc1-099e-4e77-8028-a9793c11f43b fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b + 15778f9b-fe94-4aed-86c1-e88d7f4c3b47 727522a7-934f-494d-b5b3-160968e74463 fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 + f26df0e3-46bb-4c85-8146-efcab46179da 72c59873-31ff-4e36-8d76-ff834009349a fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a + DATASETS: 21 + ID NAME COMPRESSION QUOTA RESERVATION + 09b9cc9b-3426-470b-a7bc-538f82dede03 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 off none none + 1640adb6-70bf-44cf-b05c-bff6dd300cf3 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible off none none + 2ad1875a-92ac-472f-8c26-593309f0e4da oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns off none none + 2db6b7c1-0f46-4ced-a3ad-48872793360e oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug gzip-9 100 GiB none + 2f204c50-a327-479c-8852-f53ec7a19c1f oxp_727522a7-934f-494d-b5b3-160968e74463/crucible off none none + 318fae85-abcb-4259-b1b6-ac96d193f7b7 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug gzip-9 100 GiB none + 3560dd69-3b23-4c69-807d-d673104cfc68 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec off none none + 3b66453b-7148-4c1b-84a9-499e43290ab4 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone off none none + 3e0d6188-c503-49cf-a441-fa7df40ceb43 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 off none none + 4829f422-aa31-41a8-ab73-95684ff1ef48 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone off none none + 49f8fbb6-5bac-4609-907f-6e3dfc206059 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 off none none + 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 off none none + 775f9207-c42d-4af2-9186-27ffef67735e oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone off none none + 78f34ce7-42f1-41da-995f-318f32054ad2 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible off none none + 841d5648-05f0-47b0-b446-92f6b60fe9a6 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse off none none + 8c4fa711-1d5d-4e93-85f0-d17bff47b063 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a off none none + 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns off none none + 93957ca0-9ed1-4e7b-8c34-2ce07a69541c oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug gzip-9 100 GiB none + b46de15d-33e7-4cd0-aa7c-e7be2a61e71b oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 off none none + b8f2a09f-8bd2-4418-872b-a4457a3f958c oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c off none none + c31623de-c19b-4615-9f1d-5e1daa5d3bda oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca off none none + ZONES: 9 + ID KIND IMAGE_SOURCE + 353b3b65-20f7-48c3-88f7-495bd5d31545 clickhouse artifact: 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5 + 466a9f29-62bf-4e63-924a-b9efdb86afec nexus artifact: 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc + 62620961-fc4a-481e-968b-f5acbac0dc63 internal_ntp artifact: b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531 + 6c3ae381-04f7-41ea-b0ac-74db387dbc3a external_dns artifact: 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7 + 86a22a56-0168-453d-9df1-cb2a7c64b5d3 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + 99e2f30b-3174-40bf-a78a-90da8abba8ca internal_dns artifact: c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173 + ad6a3a03-8d0f-4504-99a4-cbf73d69b973 crucible_pantry artifact: 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d + bd354eef-d8a6-4165-9124-283fb5e46d77 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + e2fdefe7-95b2-4fd2-ae37-56929a06d58c crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + zone image resolver status: + zone manifest: + path on boot disk: /fake/path/install/zones.json + boot disk inventory: + manifest generated by sled-agent + artifacts in install dataset: + - clickhouse.tar.gz (expected 1687 bytes with hash 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5): ok + - clickhouse_keeper.tar.gz (expected 1691 bytes with hash f27ef7d2ce10696c4583ea194cdf61c3907f2143f666af964b8ed3bee1346be0): ok + - clickhouse_server.tar.gz (expected 1691 bytes with hash bc35f79e04956e284c230f324fe7475ad5cb2ede08e6b4a77addcd9e6f50d33b): ok + - cockroachdb.tar.gz (expected 1690 bytes with hash a1dc64b896b4bb5d0d295f63b5edeb82b3f945e1f830b06c32f96f9de30b93d1): ok + - crucible.tar.gz (expected 1691 bytes with hash f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4): ok + - crucible_pantry.tar.gz (expected 1696 bytes with hash 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d): ok + - external_dns.tar.gz (expected 1690 bytes with hash 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7): ok + - internal_dns.tar.gz (expected 1690 bytes with hash c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173): ok + - nexus.tar.gz (expected 1683 bytes with hash 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc): ok + - ntp.tar.gz (expected 1682 bytes with hash b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531): ok + - oximeter.tar.gz (expected 1683 bytes with hash 7ea25be50cd4e98e2ba20916cb98fe8ea457372f5973eb6ac691b5bc90dbddc0): ok + no non-boot disks + mupdate override: + path on boot disk: /fake/path/install/mupdate_override.json + no override on boot disk + no non-boot disks + boot disk slot: A + slot A details: + artifact: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0000000000000000000000000000000000000000000000000000000000000000 + slot B details: + artifact: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0101010101010101010101010101010101010101010101010101010101010101 + last reconciled config: matches ledgered config + no mupdate override to clear + no orphaned datasets + all disks reconciled successfully + all datasets reconciled successfully + all zones reconciled successfully + reconciler task status: idle (finished at after running for s) + +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (role = Gimlet, serial serial0) + found at: from fake sled agent + address: [fd00:1122:3344:101::1]:12345 + usable hw threads: 10 + CPU family: amd_milan + usable memory (GiB): 0 + reservoir (GiB): 0 + physical disks: + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-073979dd-3248-44a5-9fa1-cc72a140d682" } in 0 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b" } in 1 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608" } in 2 + zpools + 073979dd-3248-44a5-9fa1-cc72a140d682: total size: 100 GiB + c6d33b64-fb96-4129-bab1-7878a06a5f9b: total size: 100 GiB + e4d937e1-6ddc-4eca-bb08-c1f73791e608: total size: 100 GiB + datasets: + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d - id: 02c56a30-7d97-406d-bd34-1eb437fd517d, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c - id: 1bca7f71-5e42-4749-91ec-fa40793a3a9a, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug - id: 248c6c10-1ac6-45de-bb55-ede36ca56bbd, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c - id: 3ac089c9-9dec-465b-863a-188e80d71fb4, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 - id: 42430c80-7836-4191-a4f6-bcee749010fe, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d - id: 43931274-7fe8-4077-825d-dff2bc8efa58, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone - id: 4617d206-4330-4dfa-b9f3-f63a3db834f9, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns - id: 4847a96e-a267-4ae7-aa3d-805c1e77f81e, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 - id: 4d7e3e8e-06bd-414c-a468-779e056a9b75, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone - id: 4f60b534-eaa3-40a1-b60f-bfdf147af478, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible - id: 50b029e3-96aa-41e5-bf39-023193a4355e, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug - id: 686c19cf-a0d7-45f6-866f-c564612b2664, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone - id: 793ac181-1b01-403c-850d-7f5c54bda6c9, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible - id: 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 - id: 832fd140-d467-4bad-b5e9-63171634087c, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 - id: a4c3032e-21fa-4d4a-b040-a7e3c572cf3c, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns - id: ad41be71-6c15-4428-b510-20ceacde4fa6, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug - id: cdf3684f-a6cf-4449-b9ec-e696b2c663e2, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible - id: ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + +LEDGERED SLED CONFIG + generation: 3 + remove_mupdate_override: None + desired host phase 2 slot a: keep current contents + desired host phase 2 slot b: keep current contents + DISKS: 3 + ID ZPOOL_ID VENDOR MODEL SERIAL + 099f869a-0f0d-430b-9c49-a7fb7fcaf3b2 e4d937e1-6ddc-4eca-bb08-c1f73791e608 fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 + 3f399219-7701-414c-9f07-8e97f688765c c6d33b64-fb96-4129-bab1-7878a06a5f9b fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b + f06acce3-ce96-4761-a070-e5e2fb87d8aa 073979dd-3248-44a5-9fa1-cc72a140d682 fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 + DATASETS: 19 + ID NAME COMPRESSION QUOTA RESERVATION + 02c56a30-7d97-406d-bd34-1eb437fd517d oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d off none none + 1bca7f71-5e42-4749-91ec-fa40793a3a9a oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c off none none + 248c6c10-1ac6-45de-bb55-ede36ca56bbd oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug gzip-9 100 GiB none + 3ac089c9-9dec-465b-863a-188e80d71fb4 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c off none none + 42430c80-7836-4191-a4f6-bcee749010fe oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 off none none + 43931274-7fe8-4077-825d-dff2bc8efa58 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d off none none + 4617d206-4330-4dfa-b9f3-f63a3db834f9 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone off none none + 4847a96e-a267-4ae7-aa3d-805c1e77f81e oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns off none none + 4d7e3e8e-06bd-414c-a468-779e056a9b75 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 off none none + 4f60b534-eaa3-40a1-b60f-bfdf147af478 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone off none none + 50b029e3-96aa-41e5-bf39-023193a4355e oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible off none none + 686c19cf-a0d7-45f6-866f-c564612b2664 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug gzip-9 100 GiB none + 793ac181-1b01-403c-850d-7f5c54bda6c9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone off none none + 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible off none none + 832fd140-d467-4bad-b5e9-63171634087c oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 off none none + a4c3032e-21fa-4d4a-b040-a7e3c572cf3c oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 off none none + ad41be71-6c15-4428-b510-20ceacde4fa6 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns off none none + cdf3684f-a6cf-4449-b9ec-e696b2c663e2 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug gzip-9 100 GiB none + ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible off none none + ZONES: 8 + ID KIND IMAGE_SOURCE + 058fd5f9-60a8-4e11-9302-15172782e17d crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + 0c71b3b2-6ceb-4e8f-b020-b08675e83038 nexus artifact: 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc + 427ec88f-f467-42fa-9bbb-66a91a36103c internal_dns artifact: c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173 + 5199c033-4cf9-4ab6-8ae7-566bd7606363 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + 6444f8a5-6465-4f0b-a549-1993c113569c internal_ntp artifact: b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531 + 803bfb63-c246-41db-b0da-d3b87ddfc63d external_dns artifact: 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7 + ba4994a8-23f9-4b1a-a84f-a08d74591389 crucible_pantry artifact: 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d + dfac80b4-a887-430a-ae87-a4e065dba787 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + zone image resolver status: + zone manifest: + path on boot disk: /fake/path/install/zones.json + boot disk inventory: + manifest generated by sled-agent + artifacts in install dataset: + - clickhouse.tar.gz (expected 1687 bytes with hash 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5): ok + - clickhouse_keeper.tar.gz (expected 1691 bytes with hash f27ef7d2ce10696c4583ea194cdf61c3907f2143f666af964b8ed3bee1346be0): ok + - clickhouse_server.tar.gz (expected 1691 bytes with hash bc35f79e04956e284c230f324fe7475ad5cb2ede08e6b4a77addcd9e6f50d33b): ok + - cockroachdb.tar.gz (expected 1690 bytes with hash a1dc64b896b4bb5d0d295f63b5edeb82b3f945e1f830b06c32f96f9de30b93d1): ok + - crucible.tar.gz (expected 1691 bytes with hash f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4): ok + - crucible_pantry.tar.gz (expected 1696 bytes with hash 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d): ok + - external_dns.tar.gz (expected 1690 bytes with hash 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7): ok + - internal_dns.tar.gz (expected 1690 bytes with hash c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173): ok + - nexus.tar.gz (expected 1683 bytes with hash 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc): ok + - ntp.tar.gz (expected 1682 bytes with hash b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531): ok + - oximeter.tar.gz (expected 1683 bytes with hash 7ea25be50cd4e98e2ba20916cb98fe8ea457372f5973eb6ac691b5bc90dbddc0): ok + no non-boot disks + mupdate override: + path on boot disk: /fake/path/install/mupdate_override.json + no override on boot disk + no non-boot disks + boot disk slot: A + slot A details: + artifact: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0000000000000000000000000000000000000000000000000000000000000000 + slot B details: + artifact: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0101010101010101010101010101010101010101010101010101010101010101 + last reconciled config: matches ledgered config + no mupdate override to clear + no orphaned datasets + all disks reconciled successfully + all datasets reconciled successfully + all zones reconciled successfully + reconciler task status: idle (finished at after running for s) + +sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (role = Gimlet, serial serial2) + found at: from fake sled agent + address: [fd00:1122:3344:103::1]:12345 + usable hw threads: 10 + CPU family: amd_milan + usable memory (GiB): 0 + reservoir (GiB): 0 + physical disks: + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-18b20749-0748-4105-bb10-7b13cfc776e2" } in 0 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2" } in 1 + U2: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-4930954e-9ac7-4453-b63f-5ab97c389a99" } in 2 + zpools + 18b20749-0748-4105-bb10-7b13cfc776e2: total size: 100 GiB + 30c16fe4-4229-49d0-ab01-3138f2c7dff2: total size: 100 GiB + 4930954e-9ac7-4453-b63f-5ab97c389a99: total size: 100 GiB + datasets: + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 - id: 090bd88d-0a43-4040-a832-b13ae721f74f, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns - id: 1cb0a47a-59ac-4892-8e92-cf87b4290f96, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug - id: 21fd4f3a-ec31-469b-87b1-087c343a2422, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone - id: 252ac39f-b9e2-4697-8c07-3a833115d704, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 - id: 3443a368-199e-4d26-b59f-3f2bbd507761, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug - id: 41071985-1dfd-4ce5-8bc2-897161a8bce4, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 - id: 429da94b-19f7-48bd-98e9-47842863ba7b, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone - id: 45cd9687-20be-4247-b62a-dfdacf324929, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 - id: 4da74a5b-6911-4cca-b624-b90c65530117, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 - id: 50ea8c15-c4c0-4403-a490-d14b3405dfc2, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 - id: 54bbadaf-ec04-41a2-a62f-f5ac5bf321be, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible - id: 6f04dd20-5e2c-4fa8-8430-a886470ed140, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug - id: 7a6a2058-ea78-49de-9730-cce5e28b4cfb, compression: gzip-9 + available: 1 GiB, used: 0 B + reservation: None, quota: Some(ByteCount(107374182400)) + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible - id: 7ea73f80-c4e0-450a-92dc-8397ce2af14f, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns - id: 96ae8389-3027-4260-9374-e0f6ce851de2, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible - id: a50cd13a-5749-4e79-bb8b-19229500a8b3, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 - id: b1deff4b-51df-4a37-9043-afbd7c70a1cb, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 - id: c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone - id: e009d8b8-4695-4322-b53f-f03f2744aef7, compression: off + available: 1 GiB, used: 0 B + reservation: None, quota: None + +LEDGERED SLED CONFIG + generation: 3 + remove_mupdate_override: None + desired host phase 2 slot a: keep current contents + desired host phase 2 slot b: keep current contents + DISKS: 3 + ID ZPOOL_ID VENDOR MODEL SERIAL + 08724339-3d5b-432c-b7f3-55efdcf9f098 4930954e-9ac7-4453-b63f-5ab97c389a99 fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 + 3bea44bb-c1c1-462f-a105-b2aae3b17373 18b20749-0748-4105-bb10-7b13cfc776e2 fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 + d1b24eb3-c174-4d23-9f94-58a8fee73135 30c16fe4-4229-49d0-ab01-3138f2c7dff2 fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 + DATASETS: 19 + ID NAME COMPRESSION QUOTA RESERVATION + 090bd88d-0a43-4040-a832-b13ae721f74f oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 off none none + 1cb0a47a-59ac-4892-8e92-cf87b4290f96 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns off none none + 21fd4f3a-ec31-469b-87b1-087c343a2422 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug gzip-9 100 GiB none + 252ac39f-b9e2-4697-8c07-3a833115d704 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone off none none + 3443a368-199e-4d26-b59f-3f2bbd507761 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 off none none + 41071985-1dfd-4ce5-8bc2-897161a8bce4 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug gzip-9 100 GiB none + 429da94b-19f7-48bd-98e9-47842863ba7b oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 off none none + 45cd9687-20be-4247-b62a-dfdacf324929 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone off none none + 4da74a5b-6911-4cca-b624-b90c65530117 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 off none none + 50ea8c15-c4c0-4403-a490-d14b3405dfc2 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 off none none + 54bbadaf-ec04-41a2-a62f-f5ac5bf321be oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 off none none + 6f04dd20-5e2c-4fa8-8430-a886470ed140 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible off none none + 7a6a2058-ea78-49de-9730-cce5e28b4cfb oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug gzip-9 100 GiB none + 7ea73f80-c4e0-450a-92dc-8397ce2af14f oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible off none none + 96ae8389-3027-4260-9374-e0f6ce851de2 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns off none none + a50cd13a-5749-4e79-bb8b-19229500a8b3 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible off none none + b1deff4b-51df-4a37-9043-afbd7c70a1cb oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 off none none + c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 off none none + e009d8b8-4695-4322-b53f-f03f2744aef7 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone off none none + ZONES: 8 + ID KIND IMAGE_SOURCE + 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 nexus artifact: 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc + 694bd14f-cb24-4be4-bb19-876e79cda2c8 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + 75b220ba-a0f4-4872-8202-dc7c87f062d0 crucible_pantry artifact: 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d + 7c252b64-c5af-4ec1-989e-9a03f3b0f111 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + ea5b4030-b52f-44b2-8d70-45f15f987d01 internal_dns artifact: c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173 + f10a4fb9-759f-4a65-b25e-5794ad2d07d8 internal_ntp artifact: b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531 + f55647d4-5500-4ad3-893a-df45bd50d622 crucible artifact: f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4 + f6ec9c67-946a-4da3-98d5-581f72ce8bf0 external_dns artifact: 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7 + zone image resolver status: + zone manifest: + path on boot disk: /fake/path/install/zones.json + boot disk inventory: + manifest generated by sled-agent + artifacts in install dataset: + - clickhouse.tar.gz (expected 1687 bytes with hash 0cc283162daad1dd9d63cd20a484f4e0157b6895c179defa8a99fd220323a6c5): ok + - clickhouse_keeper.tar.gz (expected 1691 bytes with hash f27ef7d2ce10696c4583ea194cdf61c3907f2143f666af964b8ed3bee1346be0): ok + - clickhouse_server.tar.gz (expected 1691 bytes with hash bc35f79e04956e284c230f324fe7475ad5cb2ede08e6b4a77addcd9e6f50d33b): ok + - cockroachdb.tar.gz (expected 1690 bytes with hash a1dc64b896b4bb5d0d295f63b5edeb82b3f945e1f830b06c32f96f9de30b93d1): ok + - crucible.tar.gz (expected 1691 bytes with hash f3694b20fa1de79fb1f7c3a9f89f9f9eb5ebaaefc3caba7e1991e7e2b3191ed4): ok + - crucible_pantry.tar.gz (expected 1696 bytes with hash 6055871bfa626d582162302bf027102d90a03a42866867df2582f8eba231fc6d): ok + - external_dns.tar.gz (expected 1690 bytes with hash 584217eae459e4c2bd00621cf1910d06edb8258948a4832ab0329cf42067c0c7): ok + - internal_dns.tar.gz (expected 1690 bytes with hash c29c262c79d8f3fa4e0bbec221a286ca6e02b64719b6d35f32cc5e92e36b9173): ok + - nexus.tar.gz (expected 1683 bytes with hash 5f0b97b090966bb754485c3d397d0918d54bf4ffdc6fa691b77f61686f2ac8cc): ok + - ntp.tar.gz (expected 1682 bytes with hash b661b5d1370f5ac593b4c15b5fcd22c904991cf33b6db32f886374bc022a3531): ok + - oximeter.tar.gz (expected 1683 bytes with hash 7ea25be50cd4e98e2ba20916cb98fe8ea457372f5973eb6ac691b5bc90dbddc0): ok + no non-boot disks + mupdate override: + path on boot disk: /fake/path/install/mupdate_override.json + no override on boot disk + no non-boot disks + boot disk slot: A + slot A details: + artifact: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0000000000000000000000000000000000000000000000000000000000000000 + slot B details: + artifact: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b (1000 bytes) + image name: fake from debug_assume_success() + phase 2 hash: 0101010101010101010101010101010101010101010101010101010101010101 + last reconciled config: matches ledgered config + no mupdate override to clear + no orphaned datasets + all disks reconciled successfully + all datasets reconciled successfully + all zones reconciled successfully + reconciler task status: idle (finished at after running for s) + +KEEPER MEMBERSHIP + no membership retrieved + + +COCKROACH STATUS + no cockroach status retrieved + + +NTP STATUS + No NTP zones reported timesync information + +INTERNAL DNS STATUS + Zone 427ec88f-f467-42fa-9bbb-66a91a36103c: Internal DNS generation @ 1 + Zone 99e2f30b-3174-40bf-a78a-90da8abba8ca: Internal DNS generation @ 1 + Zone ea5b4030-b52f-44b2-8d70-45f15f987d01: Internal DNS generation @ 1 + + + +> # Setup is now done -- create another TUF repository which will act as the +> # target release being updated to. > tuf-assemble ../../update-common/manifests/fake.toml INFO assembling repository in INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler @@ -72,7 +963,7 @@ configured silo names: example-silo internal DNS generations: 1 external DNS generations: 1 target number of Nexus instances: default -target release (generation 2): 1.0.0 (system-update-v1.0.0.zip) +target release (generation 3): 1.0.0 (system-update-v1.0.0.zip) artifact: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4 gimlet_sp (fake-gimlet-sp version 1.0.0) artifact: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 gimlet_rot_image_a (fake-rot version 1.0.0) artifact: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 gimlet_rot_image_b (fake-rot version 1.0.0) @@ -112,10 +1003,8 @@ planner config: > save saved.out saved planning input, collections, and blueprints to "saved.out" -> wipe all -- wiped system, reconfigurator-sim config, and RNG state - - - reset seed to reconfigurator-cli-test +> wipe system +wiped system > # This should NOT show the target release. > show @@ -129,14 +1018,14 @@ planner config: add zones with mupdate override: false -> load saved.out +> load saved.out 61f451b3-2121-4ed6-91c7-a550054f6c21 loaded data from "saved.out" result: system: - using collection f45ba181-4b56-42cc-a762-874d90184a43 as source of sled inventory data + using collection 61f451b3-2121-4ed6-91c7-a550054f6c21 as source of sled inventory data loaded sleds: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 - loaded collections: f45ba181-4b56-42cc-a762-874d90184a43 - loaded blueprints: 184f10b3-61cb-41ef-9b93-3489b2bac559, dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + loaded collections: f45ba181-4b56-42cc-a762-874d90184a43, eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51, 61f451b3-2121-4ed6-91c7-a550054f6c21 + loaded blueprints: 184f10b3-61cb-41ef-9b93-3489b2bac559, dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21, 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1, 58d5e830-0884-47d8-a7cd-b2b3751adeb4 loaded service IP pool ranges: [V4(Ipv4Range { first: 192.0.2.2, last: 192.0.2.20 })] loaded internal DNS generations: (none) loaded external DNS generations: (none) @@ -152,7 +1041,7 @@ configured silo names: example-silo internal DNS generations: 1 external DNS generations: 1 target number of Nexus instances: default -target release (generation 2): 1.0.0 (system-update-v1.0.0.zip) +target release (generation 3): 1.0.0 (system-update-v1.0.0.zip) artifact: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4 gimlet_sp (fake-gimlet-sp version 1.0.0) artifact: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 gimlet_rot_image_a (fake-rot version 1.0.0) artifact: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02 gimlet_rot_image_b (fake-rot version 1.0.0) @@ -188,7 +1077,6 @@ planner config: - > # Great. Now, let's run through an upgrade! > # First, print out what we've got. > sled-list @@ -201,28 +1089,35 @@ d81c6a84-79b8-4958-ae41-ea46c9b19763 serial2 3 fd00:1122:3344:103::/64 T ENA ID PARENT TIME_CREATED 184f10b3-61cb-41ef-9b93-3489b2bac559 * yes dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 184f10b3-61cb-41ef-9b93-3489b2bac559 + 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + 58d5e830-0884-47d8-a7cd-b2b3751adeb4 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 > inventory-list ID NERRORS TIME_DONE f45ba181-4b56-42cc-a762-874d90184a43 0 +eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 0 +61f451b3-2121-4ed6-91c7-a550054f6c21 0 > # First step: upgrade one RoT bootloader. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: -planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -231,8 +1126,8 @@ planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: > blueprint-diff latest -from: blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 -to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +from: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -270,20 +1165,23 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +generated blueprint df06bb57-ad42-4431-9206-abff322896c7 based on parent blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 blueprint source: planner with report: -planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: +planning report for blueprint df06bb57-ad42-4431-9206-abff322896c7: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -292,8 +1190,8 @@ planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: > blueprint-diff latest -from: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 -to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +from: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 +to: blueprint df06bb57-ad42-4431-9206-abff322896c7 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -329,24 +1227,27 @@ external DNS: set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT bootloader versions: stage0 -> 1.0.0 > inventory-generate -generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds +generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +generated blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba based on parent blueprint df06bb57-ad42-4431-9206-abff322896c7 blueprint source: planner with report: -planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -355,8 +1256,8 @@ planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: > blueprint-diff latest -from: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 -to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 +from: blueprint df06bb57-ad42-4431-9206-abff322896c7 +to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -401,24 +1302,27 @@ external DNS: set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B > inventory-generate -generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds +generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint df06bb57-ad42-4431-9206-abff322896c7 based on parent blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 +generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba blueprint source: planner with report: -planning report for blueprint df06bb57-ad42-4431-9206-abff322896c7: +planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -427,8 +1331,8 @@ planning report for blueprint df06bb57-ad42-4431-9206-abff322896c7: > blueprint-diff latest -from: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 -to: blueprint df06bb57-ad42-4431-9206-abff322896c7 +from: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba +to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -474,24 +1378,27 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 SP versions: active -> 1.0.0 > # This should report that the update completed, remove that update, and add a > # host OS update for this same sled. > inventory-generate -generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds +generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba based on parent blueprint df06bb57-ad42-4431-9206-abff322896c7 +generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac blueprint source: planner with report: -planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: +planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -500,12 +1407,12 @@ planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: > blueprint-diff latest -from: blueprint df06bb57-ad42-4431-9206-abff322896c7 -to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba +from: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac +to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -551,17 +1458,17 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 0.0.1 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 0.0.1 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 COCKROACHDB SETTINGS: @@ -601,19 +1508,22 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba +generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 blueprint source: planner with report: -planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: +planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -622,8 +1532,8 @@ planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: > blueprint-diff latest -from: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba -to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac +from: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -657,25 +1567,28 @@ external DNS: set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 details: B -> d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47 > inventory-generate -generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds +generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds > # Planning after only phase 2 has changed should make no changes. We're still > # waiting on phase 1 to change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac +generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 blueprint source: planner with report: -planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: +planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -684,8 +1597,8 @@ planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: > blueprint-diff latest -from: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac -to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +from: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 +to: blueprint 626487fa-7139-45ec-8416-902271fc730b COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -718,25 +1631,28 @@ external DNS: set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 1 details: active -> B, B -> b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b > inventory-generate -generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds +generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds > # Planning _still_ shouldn't make any new changes; the OS update as a whole > # isn't done until sled-agent reports it has booted from the new image. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO keeping apparently-impossible MGS-driven update (waiting for recent update to be applied), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b blueprint source: planner with report: -planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: +planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -745,8 +1661,8 @@ planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: > blueprint-diff latest -from: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 -to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 +from: blueprint 626487fa-7139-45ec-8416-902271fc730b +to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -779,27 +1695,30 @@ external DNS: set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 details: boot_disk -> B > inventory-generate -generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configured sleds +generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds > # Planning should now remove the host OS update and plan the next RoT bootloader > # update. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 +generated blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 based on parent blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b blueprint source: planner with report: -planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: +planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -808,8 +1727,8 @@ planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: > blueprint-diff latest -from: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 -to: blueprint 626487fa-7139-45ec-8416-902271fc730b +from: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b +to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -857,23 +1776,26 @@ ignoring impossible MGS updates since set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0_next -> 0.5.0 > inventory-generate -generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds +generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b +generated blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 based on parent blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 blueprint source: planner with report: -planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: +planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -882,8 +1804,8 @@ planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: > blueprint-diff latest -from: blueprint 626487fa-7139-45ec-8416-902271fc730b -to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b +from: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 +to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -926,23 +1848,26 @@ external DNS: set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0 -> 1.0.0 > inventory-generate -generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds +generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 based on parent blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b +generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 blueprint source: planner with report: -planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: +planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -951,8 +1876,8 @@ planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: > blueprint-diff latest -from: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b -to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 +from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -999,23 +1924,26 @@ ignoring impossible MGS updates since set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 0.5.0 > inventory-generate -generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds +generated inventory collection b5263998-e486-4cea-8842-b32bd326fa3a from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 based on parent blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 +generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: -planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: +planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1024,8 +1952,8 @@ planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: > blueprint-diff latest -from: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 -to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1068,23 +1996,26 @@ external DNS: set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B > inventory-generate -generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds +generated inventory collection 68767302-7fed-4eb1-9611-3dfd807ff0cd from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 +generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 blueprint source: planner with report: -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1093,8 +2024,8 @@ planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: > blueprint-diff latest -from: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 -to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1140,23 +2071,26 @@ ignoring impossible MGS updates since set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 > inventory-generate -generated inventory collection b5263998-e486-4cea-8842-b32bd326fa3a from configured sleds +generated inventory collection 62898097-2ff1-48d0-8bc1-91b475daa33d from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 +generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 blueprint source: planner with report: -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1165,8 +2099,8 @@ planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: > blueprint-diff latest -from: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 -to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1208,25 +2142,28 @@ external DNS: set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: active -> 1.0.0 > inventory-generate -generated inventory collection 68767302-7fed-4eb1-9611-3dfd807ff0cd from configured sleds +generated inventory collection 3086f142-62d3-4f77-bda3-674afbb42d0d from configured sleds > # Planning should remove this update and add an OS update for this sled. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 +generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c blueprint source: planner with report: -planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1235,12 +2172,12 @@ planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: > blueprint-diff latest -from: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 -to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +from: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2 -> 3): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -1288,18 +2225,18 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 0.0.1 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 COCKROACHDB SETTINGS: @@ -1348,22 +2285,25 @@ ignoring impossible MGS updates since set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: B -> ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff > inventory-generate -generated inventory collection 62898097-2ff1-48d0-8bc1-91b475daa33d from configured sleds +generated inventory collection ae5b3bb4-ce21-465f-b18e-857614732d66 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 +generated blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 based on parent blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f blueprint source: planner with report: -planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1372,8 +2312,8 @@ planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: > blueprint-diff latest -from: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 -to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +from: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1418,17 +2358,17 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 2 details: boot_disk -> set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: active -> B, B -> b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b > inventory-generate -generated inventory collection 3086f142-62d3-4f77-bda3-674afbb42d0d from configured sleds +generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds > # Another planning step should try to update the last sled, starting with the > # RoT bootloader. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -1436,9 +2376,12 @@ INFO skipping board for MGS-driven update, serial_number: serial1, part_number: INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c +generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 blueprint source: planner with report: -planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: +planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1447,8 +2390,8 @@ planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: > blueprint-diff latest -from: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c -to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +from: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1491,22 +2434,25 @@ external DNS: set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT bootloader versions: stage0 -> 1.0.0 > inventory-generate -generated inventory collection ae5b3bb4-ce21-465f-b18e-857614732d66 from configured sleds +generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 based on parent blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f +generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 blueprint source: planner with report: -planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: +planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1515,8 +2461,8 @@ planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: > blueprint-diff latest -from: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f -to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +from: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1563,22 +2509,25 @@ ignoring impossible MGS updates since set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.0.0, active slot -> A, persistent boot preference -> B > inventory-generate -generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds +generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 +generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a blueprint source: planner with report: -planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1587,8 +2536,8 @@ planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: > blueprint-diff latest -from: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 -to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1637,22 +2586,25 @@ ignoring impossible MGS updates since set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.1.0, active slot -> B, persistent boot preference -> B, pending persistent boot preference -> Some(B) > inventory-generate -generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds +generated inventory collection 70bea701-e212-4877-8e6c-925f1f73ddd2 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("1.0.0")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: Some(B), expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 +generated blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 based on parent blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 blueprint source: planner with report: -planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: Some(B), expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1661,8 +2613,8 @@ planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: > blueprint-diff latest -from: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 -to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +from: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1708,22 +2660,25 @@ ignoring impossible MGS updates since set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.1.0, active slot -> B, persistent boot preference -> B, pending persistent boot preference -> None, transient boot preference -> Some(B) > inventory-generate -generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configured sleds +generated inventory collection 8187f847-81c7-4750-88ac-d691937461af from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: Some(B), expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: Some(B), expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a +generated blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 based on parent blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 blueprint source: planner with report: -planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), expected_persistent_boot_preference: B, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: Some(B) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1732,8 +2687,8 @@ planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: > blueprint-diff latest -from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a -to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +from: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 +to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1777,22 +2732,25 @@ external DNS: set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot b -> 1.0.0, active slot -> B, persistent boot preference -> B, transient boot preference -> None > inventory-generate -generated inventory collection 70bea701-e212-4877-8e6c-925f1f73ddd2 from configured sleds +generated inventory collection 45c1c7bb-984a-43f7-bb3f-4a5437ed7b82 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: Some(B), expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: B, expected_active_slot: ExpectedActiveRotSlot { slot: B, version: ArtifactVersion("1.1.0") }, expected_inactive_version: Version(ArtifactVersion("0.0.2")), component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 based on parent blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 +generated blueprint e54a0836-53e1-4948-a3af-0b77165289b5 based on parent blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 blueprint source: planner with report: -planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: +planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1801,8 +2759,8 @@ planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: > blueprint-diff latest -from: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 -to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 +from: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 +to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -1845,22 +2803,25 @@ external DNS: set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 > inventory-generate -generated inventory collection 8187f847-81c7-4750-88ac-d691937461af from configured sleds +generated inventory collection ca7f27e8-5949-4ac1-8f32-18ad76d9c217 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO reached maximum number of pending MGS-driven updates, max: 1 -generated blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 based on parent blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 +generated blueprint 459a45a5-616e-421f-873b-2fb08c36205c based on parent blueprint e54a0836-53e1-4948-a3af-0b77165289b5 blueprint source: planner with report: -planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: +planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 pending MGS update: * model2:serial2: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) @@ -1869,12 +2830,12 @@ planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: > blueprint-diff latest -from: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 -to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 +from: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 +to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -1920,17 +2881,17 @@ to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 - crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 0.0.1 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 0.0.1 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 COCKROACHDB SETTINGS: @@ -1976,25 +2937,28 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 2 details: boot_disk -> set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 host phase 1 details: active -> B, B -> b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b > inventory-generate -generated inventory collection 45c1c7bb-984a-43f7-bb3f-4a5437ed7b82 from configured sleds +generated inventory collection 8a02a1c6-9e86-4dc0-9293-cd17da34f319 from configured sleds > # Do another planning run. This should start updating zones (one at a time). > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO ran out of boards for MGS-driven update -generated blueprint e54a0836-53e1-4948-a3af-0b77165289b5 based on parent blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 +generated blueprint b2295597-5788-482e-acf9-1731ec63fbd2 based on parent blueprint 459a45a5-616e-421f-873b-2fb08c36205c blueprint source: planner with report: -planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: +planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) * 25 remaining out-of-date zones @@ -2003,12 +2967,12 @@ planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: > blueprint-diff latest -from: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 -to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 +from: blueprint 459a45a5-616e-421f-873b-2fb08c36205c +to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 3 -> 4): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 4 -> 5): host phase 2 contents: ------------------------------ @@ -2058,15 +3022,15 @@ to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 ------------------------------------------------------------------------------------------------------------------------- zone type zone id image source disposition underlay IP ------------------------------------------------------------------------------------------------------------------------- - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 - install dataset in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 - artifact: version 0.0.1 in service fd00:1122:3344:102::23 └─ + artifact: version 1.0.0 @@ -2107,25 +3071,28 @@ external DNS: > # Update the first control plane zone and plan again, which should update the > # next zone on this sled. > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e54a0836-53e1-4948-a3af-0b77165289b5) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (b2295597-5788-482e-acf9-1731ec63fbd2) > inventory-generate -generated inventory collection ca7f27e8-5949-4ac1-8f32-18ad76d9c217 from configured sleds +generated inventory collection c1adcd42-121f-4580-bfb9-d8a9937ca9e1 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 459a45a5-616e-421f-873b-2fb08c36205c based on parent blueprint e54a0836-53e1-4948-a3af-0b77165289b5 +generated blueprint 6fad8fd4-e825-433f-b76d-495484e068ce based on parent blueprint b2295597-5788-482e-acf9-1731ec63fbd2 blueprint source: planner with report: -planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: +planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 62620961-fc4a-481e-968b-f5acbac0dc63 (internal_ntp) * 24 remaining out-of-date zones @@ -2134,12 +3101,12 @@ planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: > blueprint-diff latest -from: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 -to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c +from: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 +to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 4 -> 5): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): host phase 2 contents: ------------------------------ @@ -2191,14 +3158,14 @@ to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset - in service fd00:1122:3344:102::21 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 - in service fd00:1122:3344:102::21 └─ + expunged ⏳ @@ -2243,26 +3210,29 @@ external DNS: > # have to tweak the number of times we iterate on this sled as our simulated > # system or planner placement changes. > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (459a45a5-616e-421f-873b-2fb08c36205c) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (6fad8fd4-e825-433f-b76d-495484e068ce) > inventory-generate -generated inventory collection 8a02a1c6-9e86-4dc0-9293-cd17da34f319 from configured sleds +generated inventory collection 94b231f9-80a3-48a9-8d25-70f9b42b64ca from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: f83ade6d-9ab1-4679-813b-b9457e039c0b (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] -generated blueprint b2295597-5788-482e-acf9-1731ec63fbd2 based on parent blueprint 459a45a5-616e-421f-873b-2fb08c36205c +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: ba87399e-e9b7-4ee4-8cb7-0032822630e9 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint 24b6e243-100c-428d-8ea6-35b504226f55 based on parent blueprint 6fad8fd4-e825-433f-b76d-495484e068ce blueprint source: planner with report: -planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: +planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * waiting for NTP zones to appear in inventory on sleds: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * missing NTP zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c @@ -2271,12 +3241,12 @@ planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: > blueprint-diff latest -from: blueprint 459a45a5-616e-421f-873b-2fb08c36205c -to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 +from: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce +to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 5 -> 6): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 6 -> 7): host phase 2 contents: ------------------------------ @@ -2320,7 +3290,7 @@ to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off omicron zones: @@ -2328,16 +3298,16 @@ to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset - expunged ⏳ fd00:1122:3344:102::21 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:102::21 └─ + expunged ✓ -+ internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 ++ internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 COCKROACHDB SETTINGS: @@ -2361,9 +3331,9 @@ internal DNS: - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal ++ SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -+ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ name: f83ade6d-9ab1-4679-813b-b9457e039c0b.host (records: 1) ++ name: ba87399e-e9b7-4ee4-8cb7-0032822630e9.host (records: 1) + AAAA fd00:1122:3344:102::29 unchanged names: 49 (records: 61) @@ -2375,25 +3345,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (b2295597-5788-482e-acf9-1731ec63fbd2) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (24b6e243-100c-428d-8ea6-35b504226f55) > inventory-generate -generated inventory collection c1adcd42-121f-4580-bfb9-d8a9937ca9e1 from configured sleds +generated inventory collection 756aecb6-8353-46ad-a6c4-10ad0f2bbb7f from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 2, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 6fad8fd4-e825-433f-b76d-495484e068ce based on parent blueprint b2295597-5788-482e-acf9-1731ec63fbd2 +generated blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce based on parent blueprint 24b6e243-100c-428d-8ea6-35b504226f55 blueprint source: planner with report: -planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: +planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (external_dns) * 23 remaining out-of-date zones @@ -2402,12 +3375,12 @@ planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: > blueprint-diff latest -from: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 -to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce +from: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 +to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 6 -> 7): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 7 -> 8): host phase 2 contents: ------------------------------ @@ -2446,7 +3419,7 @@ to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -2461,15 +3434,15 @@ to: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset - in service fd00:1122:3344:102::24 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 - in service fd00:1122:3344:102::24 └─ + expunged ⏳ @@ -2522,25 +3495,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (6fad8fd4-e825-433f-b76d-495484e068ce) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (79fff7a2-2495-4c75-8465-4dc01bab48ce) > inventory-generate -generated inventory collection 94b231f9-80a3-48a9-8d25-70f9b42b64ca from configured sleds +generated inventory collection 84152e52-8c2e-46ab-880e-4cc2a1fb9dcb from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 24b6e243-100c-428d-8ea6-35b504226f55 based on parent blueprint 6fad8fd4-e825-433f-b76d-495484e068ce +generated blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 based on parent blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce blueprint source: planner with report: -planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: +planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * external_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -2549,12 +3525,12 @@ planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: > blueprint-diff latest -from: blueprint 6fad8fd4-e825-433f-b76d-495484e068ce -to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 +from: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce +to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 7 -> 8): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 8 -> 9): host phase 2 contents: ------------------------------ @@ -2595,12 +3571,12 @@ to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off omicron zones: @@ -2608,17 +3584,17 @@ to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset - expunged ⏳ fd00:1122:3344:102::24 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 0.0.1 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:102::24 └─ + expunged ✓ -+ external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a ++ external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a COCKROACHDB SETTINGS: @@ -2638,14 +3614,14 @@ to: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 internal DNS: * DNS zone: "control-plane.oxide.internal": ++ name: 4ab0ec67-b27e-42b5-af22-9117ad11113b.host (records: 1) ++ AAAA fd00:1122:3344:102::2a * name: _external-dns._tcp (records: 2 -> 3) - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal ++ SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal -+ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal -+ name: ba87399e-e9b7-4ee4-8cb7-0032822630e9.host (records: 1) -+ AAAA fd00:1122:3344:102::2a unchanged names: 49 (records: 61) external DNS: @@ -2670,25 +3646,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (24b6e243-100c-428d-8ea6-35b504226f55) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312) > inventory-generate -generated inventory collection 756aecb6-8353-46ad-a6c4-10ad0f2bbb7f from configured sleds +generated inventory collection bcfc7436-77de-47e4-8158-daad15a54da2 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 3, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce based on parent blueprint 24b6e243-100c-428d-8ea6-35b504226f55 +generated blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d based on parent blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 blueprint source: planner with report: -planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: +planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 86a22a56-0168-453d-9df1-cb2a7c64b5d3 (crucible) * 22 remaining out-of-date zones @@ -2697,12 +3676,12 @@ planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: > blueprint-diff latest -from: blueprint 24b6e243-100c-428d-8ea6-35b504226f55 -to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce +from: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 +to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 8 -> 9): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 9 -> 10): host phase 2 contents: ------------------------------ @@ -2729,8 +3708,8 @@ to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -2740,12 +3719,12 @@ to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -2756,16 +3735,16 @@ to: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 - install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 - artifact: version 0.0.1 in service fd00:1122:3344:102::28 └─ + artifact: version 1.0.0 @@ -2796,25 +3775,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (79fff7a2-2495-4c75-8465-4dc01bab48ce) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d) > inventory-generate -generated inventory collection 84152e52-8c2e-46ab-880e-4cc2a1fb9dcb from configured sleds +generated inventory collection 6dbdc88a-4828-480e-b41d-8946f41a3134 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 4, num_eligible: 0, num_ineligible: 5 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 based on parent blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce +generated blueprint e2125c83-b255-45c9-bc9b-802cff09a812 based on parent blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d blueprint source: planner with report: -planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: +planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (internal_dns) * 21 remaining out-of-date zones @@ -2823,12 +3805,12 @@ planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: > blueprint-diff latest -from: blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce -to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 +from: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d +to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 9 -> 10): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 10 -> 11): host phase 2 contents: ------------------------------ @@ -2855,8 +3837,8 @@ to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off @@ -2865,11 +3847,11 @@ to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -2885,15 +3867,15 @@ to: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset - in service fd00:1122:3344:1::1 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 - in service fd00:1122:3344:1::1 └─ + expunged ⏳ @@ -2946,25 +3928,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e2125c83-b255-45c9-bc9b-802cff09a812) > inventory-generate -generated inventory collection bcfc7436-77de-47e4-8158-daad15a54da2 from configured sleds +generated inventory collection eb500068-cd91-484b-a532-51081571ecbe from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d based on parent blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 +generated blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 based on parent blueprint e2125c83-b255-45c9-bc9b-802cff09a812 blueprint source: planner with report: -planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: +planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * internal_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -2973,12 +3958,12 @@ planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: > blueprint-diff latest -from: blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 -to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d +from: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 +to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 10 -> 11): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 11 -> 12): host phase 2 contents: ------------------------------ @@ -3005,8 +3990,8 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -3016,17 +4001,17 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off omicron zones: @@ -3035,17 +4020,17 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset - expunged ⏳ fd00:1122:3344:1::1 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 in service fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:1::1 └─ + expunged ✓ -+ internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 ++ internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 COCKROACHDB SETTINGS: @@ -3065,6 +4050,8 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d internal DNS: * DNS zone: "control-plane.oxide.internal": ++ name: 698d1d82-0620-4978-93ac-0ba5d40f3da9.host (records: 1) ++ AAAA fd00:1122:3344:1::1 * name: @ (records: 2 -> 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal @@ -3075,10 +4062,8 @@ internal DNS: - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal -+ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal -+ name: c821c39d-2b2c-4c55-8874-ac12315ba1e4.host (records: 1) -+ AAAA fd00:1122:3344:1::1 * name: ns1 (records: 1 -> 1) - AAAA fd00:1122:3344:2::1 + AAAA fd00:1122:3344:1::1 @@ -3097,25 +4082,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (f4a6848e-d13c-46e1-8c6a-944f886d7ba3) > inventory-generate -generated inventory collection 6dbdc88a-4828-480e-b41d-8946f41a3134 from configured sleds +generated inventory collection 4492baf6-5638-4c1f-bba2-608163519022 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 5, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint e2125c83-b255-45c9-bc9b-802cff09a812 based on parent blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d +generated blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c based on parent blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 blueprint source: planner with report: -planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: +planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (crucible_pantry) * 20 remaining out-of-date zones @@ -3124,12 +4112,12 @@ planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: > blueprint-diff latest -from: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d -to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 +from: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 +to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 11 -> 12): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 12 -> 13): host phase 2 contents: ------------------------------ @@ -3156,10 +4144,10 @@ to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off @@ -3167,13 +4155,13 @@ to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -3187,16 +4175,16 @@ to: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 - internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset - in service fd00:1122:3344:102::25 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 - in service fd00:1122:3344:102::25 └─ + expunged ⏳ @@ -3235,25 +4223,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e2125c83-b255-45c9-bc9b-802cff09a812) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (834e4dbe-3b71-443d-bd4c-20e8253abc0c) > inventory-generate -generated inventory collection eb500068-cd91-484b-a532-51081571ecbe from configured sleds +generated inventory collection 73f58d4d-6be7-4007-811c-0e578279410e from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 based on parent blueprint e2125c83-b255-45c9-bc9b-802cff09a812 +generated blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 based on parent blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c blueprint source: planner with report: -planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: +planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * crucible_pantry zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -3262,12 +4253,12 @@ planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: > blueprint-diff latest -from: blueprint e2125c83-b255-45c9-bc9b-802cff09a812 -to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 +from: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c +to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 12 -> 13): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 13 -> 14): host phase 2 contents: ------------------------------ @@ -3294,9 +4285,9 @@ to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -3306,17 +4297,17 @@ to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 -+ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off ++ oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off omicron zones: @@ -3325,18 +4316,18 @@ to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 - internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset - expunged ⏳ fd00:1122:3344:102::25 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 0.0.1 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:102::25 └─ + expunged ✓ -+ crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b ++ crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b COCKROACHDB SETTINGS: @@ -3356,12 +4347,12 @@ to: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 698d1d82-0620-4978-93ac-0ba5d40f3da9.host (records: 1) ++ name: 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host (records: 1) + AAAA fd00:1122:3344:102::2b * name: _crucible-pantry._tcp (records: 2 -> 3) - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal -+ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal unchanged names: 49 (records: 61) @@ -3374,25 +4365,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (f4a6848e-d13c-46e1-8c6a-944f886d7ba3) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1) > inventory-generate -generated inventory collection 4492baf6-5638-4c1f-bba2-608163519022 from configured sleds +generated inventory collection 74448e29-ef07-4d7f-9d31-39079eba8296 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 6, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c based on parent blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 +generated blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 based on parent blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 blueprint source: planner with report: -planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: +planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone bd354eef-d8a6-4165-9124-283fb5e46d77 (crucible) * 19 remaining out-of-date zones @@ -3401,12 +4395,12 @@ planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: > blueprint-diff latest -from: blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 -to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c +from: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 +to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 13 -> 14): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 14 -> 15): host phase 2 contents: ------------------------------ @@ -3433,9 +4427,9 @@ to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -3444,15 +4438,15 @@ to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -3464,17 +4458,17 @@ to: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c -------------------------------------------------------------------------------------------------------------------------- clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 - internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* crucible bd354eef-d8a6-4165-9124-283fb5e46d77 - install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 0.0.1 in service fd00:1122:3344:102::27 + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* crucible bd354eef-d8a6-4165-9124-283fb5e46d77 - artifact: version 0.0.1 in service fd00:1122:3344:102::26 └─ + artifact: version 1.0.0 @@ -3505,25 +4499,28 @@ external DNS: > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (834e4dbe-3b71-443d-bd4c-20e8253abc0c) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e2deb7c0-2262-49fe-855f-4250c22afb36) > inventory-generate -generated inventory collection 73f58d4d-6be7-4007-811c-0e578279410e from configured sleds +generated inventory collection a815c282-5564-4cea-b667-a7a5295fc2c1 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 7, num_eligible: 0, num_ineligible: 2 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 based on parent blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c +generated blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf based on parent blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 blueprint source: planner with report: -planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: +planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone e2fdefe7-95b2-4fd2-ae37-56929a06d58c (crucible) * 18 remaining out-of-date zones @@ -3532,12 +4529,12 @@ planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: > blueprint-diff latest -from: blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c -to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 +from: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 +to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 14 -> 15): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 15 -> 16): host phase 2 contents: ------------------------------ @@ -3564,9 +4561,9 @@ to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -3575,15 +4572,15 @@ to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 @@ -3596,16 +4593,16 @@ to: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 - crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 - internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c - install dataset in service fd00:1122:3344:102::27 + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 +* crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c - artifact: version 0.0.1 in service fd00:1122:3344:102::27 └─ + artifact: version 1.0.0 @@ -3639,25 +4636,28 @@ external DNS: > # after everything else, so the next step should update the first zone on the > # next sled. > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (23ce505c-8991-44a5-8863-f2b906fba9cf) > inventory-generate -generated inventory collection 74448e29-ef07-4d7f-9d31-39079eba8296 from configured sleds +generated inventory collection 18ca4fd2-190d-4ac5-b0f3-14a384bcd254 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 based on parent blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 +generated blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d based on parent blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf blueprint source: planner with report: -planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: +planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 058fd5f9-60a8-4e11-9302-15172782e17d (crucible) * 17 remaining out-of-date zones @@ -3666,12 +4666,12 @@ planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: > blueprint-diff latest -from: blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 -to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 +from: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf +to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 4 -> 5): host phase 2 contents: ------------------------------ @@ -3719,14 +4719,14 @@ to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 ------------------------------------------------------------------------------------------------------------------------- zone type zone id image source disposition underlay IP ------------------------------------------------------------------------------------------------------------------------- - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* crucible 058fd5f9-60a8-4e11-9302-15172782e17d - install dataset in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 0.0.1 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* crucible 058fd5f9-60a8-4e11-9302-15172782e17d - artifact: version 0.0.1 in service fd00:1122:3344:101::27 └─ + artifact: version 1.0.0 @@ -3758,25 +4758,28 @@ external DNS: > # Step through updates of all the non-Nexus zones on this sled. > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e2deb7c0-2262-49fe-855f-4250c22afb36) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c0d81ea6-909c-4efb-964e-beff67f6da0d) > inventory-generate -generated inventory collection a815c282-5564-4cea-b667-a7a5295fc2c1 from configured sleds +generated inventory collection b460bcc7-664d-4dff-92fb-f250def5537c from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf based on parent blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 +generated blueprint 60b55d33-5fec-4277-9864-935197eaead7 based on parent blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d blueprint source: planner with report: -planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: +planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 427ec88f-f467-42fa-9bbb-66a91a36103c (internal_dns) * 16 remaining out-of-date zones @@ -3785,12 +4788,12 @@ planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: > blueprint-diff latest -from: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 -to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf +from: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d +to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 4 -> 5): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5 -> 6): host phase 2 contents: ------------------------------ @@ -3841,13 +4844,13 @@ to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset - in service fd00:1122:3344:2::1 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 0.0.1 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 - in service fd00:1122:3344:2::1 └─ + expunged ⏳ @@ -3878,9 +4881,9 @@ internal DNS: + NS ns2.control-plane.oxide.internal * name: _nameservice._tcp (records: 3 -> 2) - SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal -- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal -+ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal * name: ns2 (records: 1 -> 1) - AAAA fd00:1122:3344:2::1 @@ -3897,25 +4900,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (23ce505c-8991-44a5-8863-f2b906fba9cf) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (60b55d33-5fec-4277-9864-935197eaead7) > inventory-generate -generated inventory collection 18ca4fd2-190d-4ac5-b0f3-14a384bcd254 from configured sleds +generated inventory collection f8212fb6-115e-4568-a05c-b241e2e8ffb9 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 1, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d based on parent blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf +generated blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 based on parent blueprint 60b55d33-5fec-4277-9864-935197eaead7 blueprint source: planner with report: -planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: +planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * internal_dns zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -3924,12 +4930,12 @@ planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: > blueprint-diff latest -from: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf -to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d +from: blueprint 60b55d33-5fec-4277-9864-935197eaead7 +to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5 -> 6): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 6 -> 7): host phase 2 contents: ------------------------------ @@ -3971,8 +4977,8 @@ to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off omicron zones: @@ -3980,15 +4986,15 @@ to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset - expunged ⏳ fd00:1122:3344:2::1 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 0.0.1 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:2::1 └─ + expunged ✓ -+ internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 ++ internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 COCKROACHDB SETTINGS: @@ -4008,8 +5014,6 @@ to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 71f71743-8c73-43c6-b080-427ec28ef4c9.host (records: 1) -+ AAAA fd00:1122:3344:2::1 * name: @ (records: 2 -> 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal @@ -4017,11 +5021,13 @@ internal DNS: + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal * name: _nameservice._tcp (records: 2 -> 3) -- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal -+ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal -+ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 5353 abd27551-4027-4084-8b52-13a575b035b4.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal ++ name: abd27551-4027-4084-8b52-13a575b035b4.host (records: 1) ++ AAAA fd00:1122:3344:2::1 * name: ns2 (records: 1 -> 1) - AAAA fd00:1122:3344:3::1 + AAAA fd00:1122:3344:2::1 @@ -4037,25 +5043,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c0d81ea6-909c-4efb-964e-beff67f6da0d) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (aa13f40f-41ff-4b68-bee1-df2e1f805544) > inventory-generate -generated inventory collection b460bcc7-664d-4dff-92fb-f250def5537c from configured sleds +generated inventory collection f7602eed-bc12-42db-8eec-6f98a05d9796 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 60b55d33-5fec-4277-9864-935197eaead7 based on parent blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d +generated blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 based on parent blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 blueprint source: planner with report: -planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: +planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (crucible) * 15 remaining out-of-date zones @@ -4064,12 +5073,12 @@ planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: > blueprint-diff latest -from: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d -to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 +from: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 +to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 6 -> 7): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): host phase 2 contents: ------------------------------ @@ -4096,8 +5105,8 @@ to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4107,7 +5116,7 @@ to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 @@ -4120,14 +5129,14 @@ to: blueprint 60b55d33-5fec-4277-9864-935197eaead7 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - artifact: version 0.0.1 in service fd00:1122:3344:101::25 └─ + artifact: version 1.0.0 @@ -4158,25 +5167,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (60b55d33-5fec-4277-9864-935197eaead7) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (316ccd9e-5c53-46c3-a2e9-20c3867b7111) > inventory-generate -generated inventory collection f8212fb6-115e-4568-a05c-b241e2e8ffb9 from configured sleds +generated inventory collection af824d9a-296d-4a2f-b704-c985c7470a1a from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 3, num_eligible: 0, num_ineligible: 5 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 based on parent blueprint 60b55d33-5fec-4277-9864-935197eaead7 +generated blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a based on parent blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 blueprint source: planner with report: -planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: +planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 6444f8a5-6465-4f0b-a549-1993c113569c (internal_ntp) * 14 remaining out-of-date zones @@ -4185,12 +5197,12 @@ planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: > blueprint-diff latest -from: blueprint 60b55d33-5fec-4277-9864-935197eaead7 -to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 +from: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 +to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7 -> 8): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 8 -> 9): host phase 2 contents: ------------------------------ @@ -4217,8 +5229,8 @@ to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4228,7 +5240,7 @@ to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 @@ -4243,13 +5255,13 @@ to: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset - in service fd00:1122:3344:101::21 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 - in service fd00:1122:3344:101::21 └─ + expunged ⏳ @@ -4274,10 +5286,10 @@ internal DNS: - AAAA fd00:1122:3344:101::21 * name: _internal-ntp._tcp (records: 3 -> 2) - SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal +- SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -+ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -4288,26 +5300,29 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (aa13f40f-41ff-4b68-bee1-df2e1f805544) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (02078c95-3d58-4b7b-a03f-9b160361c50a) > inventory-generate -generated inventory collection f7602eed-bc12-42db-8eec-6f98a05d9796 from configured sleds +generated inventory collection 2d5a41c5-bf7b-464c-a4b7-b14ab35982c4 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 3, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: cc6fdaf4-0195-4cef-950d-7bacd7059787 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] -generated blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 based on parent blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: e14f91b0-0c41-48a0-919d-e5078d2b89b0 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 based on parent blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a blueprint source: planner with report: -planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: +planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * waiting for NTP zones to appear in inventory on sleds: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * missing NTP zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 @@ -4316,12 +5331,12 @@ planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: > blueprint-diff latest -from: blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 -to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 +from: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a +to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 8 -> 9): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 9 -> 10): host phase 2 contents: ------------------------------ @@ -4348,8 +5363,8 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4359,13 +5374,13 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off omicron zones: @@ -4374,15 +5389,15 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset - expunged ⏳ fd00:1122:3344:101::21 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:101::21 └─ + expunged ✓ -+ internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 ++ internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 COCKROACHDB SETTINGS: @@ -4403,12 +5418,12 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 internal DNS: * DNS zone: "control-plane.oxide.internal": * name: _internal-ntp._tcp (records: 2 -> 3) +- SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal ++ SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -+ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ name: cc6fdaf4-0195-4cef-950d-7bacd7059787.host (records: 1) ++ name: e14f91b0-0c41-48a0-919d-e5078d2b89b0.host (records: 1) + AAAA fd00:1122:3344:101::28 unchanged names: 49 (records: 61) @@ -4420,25 +5435,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (316ccd9e-5c53-46c3-a2e9-20c3867b7111) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e7a01ffc-6b0e-408b-917b-b1efe18b3110) > inventory-generate -generated inventory collection af824d9a-296d-4a2f-b704-c985c7470a1a from configured sleds +generated inventory collection 94c793da-83b2-4fdf-b085-d7ef476bf204 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a based on parent blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 +generated blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 based on parent blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 blueprint source: planner with report: -planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: +planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (external_dns) * 13 remaining out-of-date zones @@ -4447,12 +5465,12 @@ planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: > blueprint-diff latest -from: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 -to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a +from: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 +to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 9 -> 10): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 10 -> 11): host phase 2 contents: ------------------------------ @@ -4478,8 +5496,8 @@ to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4488,10 +5506,10 @@ to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 @@ -4507,14 +5525,14 @@ to: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset - in service fd00:1122:3344:101::23 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 - in service fd00:1122:3344:101::23 └─ + expunged ⏳ @@ -4538,10 +5556,10 @@ internal DNS: - name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) - AAAA fd00:1122:3344:101::23 * name: _external-dns._tcp (records: 3 -> 2) +- SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal - SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal -- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal -+ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal unchanged names: 49 (records: 61) @@ -4564,25 +5582,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (02078c95-3d58-4b7b-a03f-9b160361c50a) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (880e2ffc-8187-4275-a2f3-1b36aa2f4482) > inventory-generate -generated inventory collection 2d5a41c5-bf7b-464c-a4b7-b14ab35982c4 from configured sleds +generated inventory collection 388a8c73-4ec0-4a23-9f82-225e652d8f37 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 4, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 based on parent blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a +generated blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec based on parent blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 blueprint source: planner with report: -planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: +planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * external_dns zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -4591,12 +5612,12 @@ planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: > blueprint-diff latest -from: blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a -to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 +from: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 +to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 10 -> 11): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 11 -> 12): host phase 2 contents: ------------------------------ @@ -4623,8 +5644,8 @@ to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4634,15 +5655,15 @@ to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off omicron zones: @@ -4651,16 +5672,16 @@ to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset - expunged ⏳ fd00:1122:3344:101::23 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 in service fd00:1122:3344:101::24 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:101::23 └─ + expunged ✓ -+ external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 ++ external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 COCKROACHDB SETTINGS: @@ -4680,14 +5701,14 @@ to: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 internal DNS: * DNS zone: "control-plane.oxide.internal": ++ name: 26bdd109-c842-43a9-95cb-15aba9b0832b.host (records: 1) ++ AAAA fd00:1122:3344:101::29 * name: _external-dns._tcp (records: 2 -> 3) -- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal -+ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal -+ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal -+ name: e14f91b0-0c41-48a0-919d-e5078d2b89b0.host (records: 1) -+ AAAA fd00:1122:3344:101::29 unchanged names: 49 (records: 61) external DNS: @@ -4709,25 +5730,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e7a01ffc-6b0e-408b-917b-b1efe18b3110) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c4a20bcb-1a71-4e88-97b4-36d16f55daec) > inventory-generate -generated inventory collection 94c793da-83b2-4fdf-b085-d7ef476bf204 from configured sleds +generated inventory collection 2d608b8f-bf88-4707-ac27-6be62f3d5146 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 based on parent blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 +generated blueprint a2c6496d-98fc-444d-aa36-99508aa72367 based on parent blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec blueprint source: planner with report: -planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: +planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (crucible_pantry) * 12 remaining out-of-date zones @@ -4736,12 +5760,12 @@ planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: > blueprint-diff latest -from: blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 -to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 +from: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec +to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 11 -> 12): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 12 -> 13): host phase 2 contents: ------------------------------ @@ -4767,23 +5791,23 @@ to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 @@ -4797,15 +5821,15 @@ to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 - external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset - in service fd00:1122:3344:101::24 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 - in service fd00:1122:3344:101::24 └─ + expunged ⏳ @@ -4827,10 +5851,10 @@ to: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 internal DNS: * DNS zone: "control-plane.oxide.internal": * name: _crucible-pantry._tcp (records: 3 -> 2) -- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal -+ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 @@ -4844,25 +5868,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (880e2ffc-8187-4275-a2f3-1b36aa2f4482) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (a2c6496d-98fc-444d-aa36-99508aa72367) > inventory-generate -generated inventory collection 388a8c73-4ec0-4a23-9f82-225e652d8f37 from configured sleds +generated inventory collection 21e24074-fdd0-438e-a4e7-11665b7071bb from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 5, num_eligible: 0, num_ineligible: 2 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec based on parent blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 +generated blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 based on parent blueprint a2c6496d-98fc-444d-aa36-99508aa72367 blueprint source: planner with report: -planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: +planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * crucible_pantry zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -4871,12 +5898,12 @@ planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: > blueprint-diff latest -from: blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 -to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec +from: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 +to: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 12 -> 13): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 13 -> 14): host phase 2 contents: ------------------------------ @@ -4902,10 +5929,10 @@ to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off @@ -4913,17 +5940,17 @@ to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 -+ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off ++ oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off omicron zones: @@ -4932,17 +5959,17 @@ to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 - external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset - expunged ⏳ fd00:1122:3344:101::24 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 0.0.1 in service fd00:1122:3344:101::26 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:101::24 └─ + expunged ✓ -+ crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a ++ crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a COCKROACHDB SETTINGS: @@ -4962,14 +5989,14 @@ to: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 26bdd109-c842-43a9-95cb-15aba9b0832b.host (records: 1) ++ name: 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host (records: 1) + AAAA fd00:1122:3344:101::2a * name: _crucible-pantry._tcp (records: 2 -> 3) -- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal -+ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal -+ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal ++ SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -4980,25 +6007,28 @@ external DNS: > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (c4a20bcb-1a71-4e88-97b4-36d16f55daec) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (6ed56354-5941-40d1-a06c-b0e940701d52) > inventory-generate -generated inventory collection 2d608b8f-bf88-4707-ac27-6be62f3d5146 from configured sleds +generated inventory collection ed8ea2c4-4271-407e-9c84-54129418d171 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 6, num_eligible: 0, num_ineligible: 2 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint a2c6496d-98fc-444d-aa36-99508aa72367 based on parent blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec +generated blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 based on parent blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 blueprint source: planner with report: -planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: +planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone dfac80b4-a887-430a-ae87-a4e065dba787 (crucible) * 11 remaining out-of-date zones @@ -5007,12 +6037,12 @@ planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: > blueprint-diff latest -from: blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec -to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 +from: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 +to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 13 -> 14): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 14 -> 15): host phase 2 contents: ------------------------------ @@ -5038,25 +6068,25 @@ to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 @@ -5068,16 +6098,16 @@ to: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 -------------------------------------------------------------------------------------------------------------------------- crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 - crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset expunged ✓ fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 - external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -* crucible dfac80b4-a887-430a-ae87-a4e065dba787 - install dataset in service fd00:1122:3344:101::26 + crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::24 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 +* crucible dfac80b4-a887-430a-ae87-a4e065dba787 - artifact: version 0.0.1 in service fd00:1122:3344:101::26 └─ + artifact: version 1.0.0 @@ -5111,25 +6141,28 @@ external DNS: > # after everything else, so the next step should update the first zone on the > # next sled. > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (a2c6496d-98fc-444d-aa36-99508aa72367) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (9078c4ba-3a73-4b3f-ac2c-acb501f89cb2) > inventory-generate -generated inventory collection 21e24074-fdd0-438e-a4e7-11665b7071bb from configured sleds +generated inventory collection 336dbc73-f973-4962-a210-3c9d424bd6a3 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 based on parent blueprint a2c6496d-98fc-444d-aa36-99508aa72367 +generated blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 based on parent blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 blueprint source: planner with report: -planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: +planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 694bd14f-cb24-4be4-bb19-876e79cda2c8 (crucible) * 10 remaining out-of-date zones @@ -5138,12 +6171,12 @@ planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: > blueprint-diff latest -from: blueprint a2c6496d-98fc-444d-aa36-99508aa72367 -to: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 +from: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 +to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 4 -> 5): host phase 2 contents: ------------------------------ @@ -5191,14 +6224,14 @@ to: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 ------------------------------------------------------------------------------------------------------------------------- zone type zone id image source disposition underlay IP ------------------------------------------------------------------------------------------------------------------------- - crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 - install dataset in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 0.0.1 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 - artifact: version 0.0.1 in service fd00:1122:3344:103::26 └─ + artifact: version 1.0.0 @@ -5230,25 +6263,28 @@ external DNS: > # Step through updates of all the non-Nexus zones on this sled. > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (6ed56354-5941-40d1-a06c-b0e940701d52) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (8763abc1-8a42-4932-b5a7-33109e0e0152) > inventory-generate -generated inventory collection ed8ea2c4-4271-407e-9c84-54129418d171 from configured sleds +generated inventory collection 897721fc-b087-41be-a566-809d59c8aeea from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 1, num_eligible: 0, num_ineligible: 7 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 based on parent blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 +generated blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 based on parent blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 blueprint source: planner with report: -planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: +planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (crucible_pantry) * 9 remaining out-of-date zones @@ -5257,12 +6293,12 @@ planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: > blueprint-diff latest -from: blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 -to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 +from: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 +to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 4 -> 5): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5 -> 6): host phase 2 contents: ------------------------------ @@ -5312,13 +6348,13 @@ to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 - crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset - in service fd00:1122:3344:103::24 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 0.0.1 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 - in service fd00:1122:3344:103::24 └─ + expunged ⏳ @@ -5342,11 +6378,11 @@ internal DNS: - name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) - AAAA fd00:1122:3344:103::24 * name: _crucible-pantry._tcp (records: 3 -> 2) -- SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal -- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal - SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal -+ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal -+ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal ++ SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal ++ SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -5357,25 +6393,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (9078c4ba-3a73-4b3f-ac2c-acb501f89cb2) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2b89e0d7-f15b-4474-8ac4-85959ed1bc88) > inventory-generate -generated inventory collection 336dbc73-f973-4962-a210-3c9d424bd6a3 from configured sleds +generated inventory collection 5d0b9686-48df-4642-a39c-e2dea04d5330 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 1, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 based on parent blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 +generated blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 based on parent blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 blueprint source: planner with report: -planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: +planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts * discretionary zones placed: * crucible_pantry zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -5384,12 +6423,12 @@ planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: > blueprint-diff latest -from: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 -to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 +from: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 +to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5 -> 6): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6 -> 7): host phase 2 contents: ------------------------------ @@ -5431,7 +6470,7 @@ to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off omicron zones: @@ -5439,15 +6478,15 @@ to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 - crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset - expunged ⏳ fd00:1122:3344:103::24 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 0.0.1 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:103::24 └─ + expunged ✓ -+ crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 ++ crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 COCKROACHDB SETTINGS: @@ -5467,14 +6506,14 @@ to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 internal DNS: * DNS zone: "control-plane.oxide.internal": -* name: _crucible-pantry._tcp (records: 2 -> 3) -- SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal -- SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal -+ SRV port 17000 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal -+ SRV port 17000 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal -+ SRV port 17000 c88fcd7d-9509-470e-8c4f-3e6f09104cdc.host.control-plane.oxide.internal -+ name: c88fcd7d-9509-470e-8c4f-3e6f09104cdc.host (records: 1) ++ name: 9464c6ed-ffa6-4e88-ae4e-76551d82b2af.host (records: 1) + AAAA fd00:1122:3344:103::28 +* name: _crucible-pantry._tcp (records: 2 -> 3) +- SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal +- SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal ++ SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal ++ SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal ++ SRV port 17000 9464c6ed-ffa6-4e88-ae4e-76551d82b2af.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -5485,25 +6524,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (8763abc1-8a42-4932-b5a7-33109e0e0152) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4) > inventory-generate -generated inventory collection 897721fc-b087-41be-a566-809d59c8aeea from configured sleds +generated inventory collection 90f0f757-fd33-4744-abee-36616a645b87 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 2, num_eligible: 0, num_ineligible: 6 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 based on parent blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 +generated blueprint 59630e63-c953-4807-9e84-9e750a79f68e based on parent blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 blueprint source: planner with report: -planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: +planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 7c252b64-c5af-4ec1-989e-9a03f3b0f111 (crucible) * 8 remaining out-of-date zones @@ -5512,12 +6554,12 @@ planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: > blueprint-diff latest -from: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 -to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 +from: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 +to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6 -> 7): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 7 -> 8): host phase 2 contents: ------------------------------ @@ -5552,7 +6594,7 @@ to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off @@ -5567,14 +6609,14 @@ to: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 zone type zone id image source disposition underlay IP -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 - install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 - artifact: version 0.0.1 in service fd00:1122:3344:103::27 └─ + artifact: version 1.0.0 @@ -5605,25 +6647,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2b89e0d7-f15b-4474-8ac4-85959ed1bc88) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (59630e63-c953-4807-9e84-9e750a79f68e) > inventory-generate -generated inventory collection 5d0b9686-48df-4642-a39c-e2dea04d5330 from configured sleds +generated inventory collection ee9bc64a-70f7-4d81-b39d-a709754ce118 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 3, num_eligible: 0, num_ineligible: 5 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 based on parent blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 +generated blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 based on parent blueprint 59630e63-c953-4807-9e84-9e750a79f68e blueprint source: planner with report: -planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: +planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (internal_dns) * 7 remaining out-of-date zones @@ -5632,12 +6677,12 @@ planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: > blueprint-diff latest -from: blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 -to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 +from: blueprint 59630e63-c953-4807-9e84-9e750a79f68e +to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 7 -> 8): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 8 -> 9): host phase 2 contents: ------------------------------ @@ -5671,7 +6716,7 @@ to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off @@ -5690,13 +6735,13 @@ to: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset - in service fd00:1122:3344:3::1 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 - in service fd00:1122:3344:3::1 └─ + expunged ⏳ @@ -5724,11 +6769,11 @@ internal DNS: + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal * name: _nameservice._tcp (records: 3 -> 2) -- SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal -- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 5353 abd27551-4027-4084-8b52-13a575b035b4.host.control-plane.oxide.internal - SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal -+ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal -+ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal ++ SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 5353 abd27551-4027-4084-8b52-13a575b035b4.host.control-plane.oxide.internal - name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) - AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) @@ -5743,25 +6788,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e93650dc-b5ba-4ec7-8550-9171c1ada194) > inventory-generate -generated inventory collection 90f0f757-fd33-4744-abee-36616a645b87 from configured sleds +generated inventory collection 3dc9d8c8-8f50-4d6e-9396-97058d1d2722 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 3, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 59630e63-c953-4807-9e84-9e750a79f68e based on parent blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 +generated blueprint 90650737-8142-47a6-9a48-a10efc487e57 based on parent blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 blueprint source: planner with report: -planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: +planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts * discretionary zones placed: * internal_dns zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -5770,12 +6818,12 @@ planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: > blueprint-diff latest -from: blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 -to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e +from: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 +to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 8 -> 9): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 9 -> 10): host phase 2 contents: ------------------------------ @@ -5810,7 +6858,7 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off @@ -5818,8 +6866,8 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off omicron zones: @@ -5828,15 +6876,15 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset - expunged ⏳ fd00:1122:3344:3::1 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:3::1 └─ + expunged ✓ -+ internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 ++ internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 COCKROACHDB SETTINGS: @@ -5856,8 +6904,6 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 7fbd0103-d7f8-48a5-b95e-29bf812cac1f.host (records: 1) -+ AAAA fd00:1122:3344:3::1 * name: @ (records: 2 -> 3) - NS ns1.control-plane.oxide.internal - NS ns2.control-plane.oxide.internal @@ -5865,11 +6911,13 @@ internal DNS: + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal * name: _nameservice._tcp (records: 2 -> 3) -- SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal -- SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal -+ SRV port 5353 71f71743-8c73-43c6-b080-427ec28ef4c9.host.control-plane.oxide.internal -+ SRV port 5353 7fbd0103-d7f8-48a5-b95e-29bf812cac1f.host.control-plane.oxide.internal -+ SRV port 5353 c821c39d-2b2c-4c55-8874-ac12315ba1e4.host.control-plane.oxide.internal +- SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal +- SRV port 5353 abd27551-4027-4084-8b52-13a575b035b4.host.control-plane.oxide.internal ++ SRV port 5353 698d1d82-0620-4978-93ac-0ba5d40f3da9.host.control-plane.oxide.internal ++ SRV port 5353 abd27551-4027-4084-8b52-13a575b035b4.host.control-plane.oxide.internal ++ SRV port 5353 d5fd048a-8786-42d3-938e-820eae95d7f4.host.control-plane.oxide.internal ++ name: d5fd048a-8786-42d3-938e-820eae95d7f4.host (records: 1) ++ AAAA fd00:1122:3344:3::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 unchanged names: 47 (records: 57) @@ -5882,25 +6930,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (59630e63-c953-4807-9e84-9e750a79f68e) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (90650737-8142-47a6-9a48-a10efc487e57) > inventory-generate -generated inventory collection ee9bc64a-70f7-4d81-b39d-a709754ce118 from configured sleds +generated inventory collection a4dab274-0fff-47fa-bc22-b98d11ec54d2 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 4, num_eligible: 0, num_ineligible: 4 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 based on parent blueprint 59630e63-c953-4807-9e84-9e750a79f68e +generated blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa based on parent blueprint 90650737-8142-47a6-9a48-a10efc487e57 blueprint source: planner with report: -planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: +planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (internal_ntp) * 6 remaining out-of-date zones @@ -5909,12 +6960,12 @@ planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: > blueprint-diff latest -from: blueprint 59630e63-c953-4807-9e84-9e750a79f68e -to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 +from: blueprint 90650737-8142-47a6-9a48-a10efc487e57 +to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 9 -> 10): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 10 -> 11): host phase 2 contents: ------------------------------ @@ -5942,7 +6993,7 @@ to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off @@ -5950,9 +7001,9 @@ to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 @@ -5968,14 +7019,14 @@ to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset - in service fd00:1122:3344:103::21 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 - in service fd00:1122:3344:103::21 └─ + expunged ⏳ @@ -5997,11 +7048,11 @@ to: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 internal DNS: * DNS zone: "control-plane.oxide.internal": * name: _internal-ntp._tcp (records: 3 -> 2) -- SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal +- SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal -- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal -+ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal ++ SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 unchanged names: 49 (records: 61) @@ -6014,26 +7065,29 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e93650dc-b5ba-4ec7-8550-9171c1ada194) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2182613d-dc9f-41eb-9c6a-d33801849caa) > inventory-generate -generated inventory collection 3dc9d8c8-8f50-4d6e-9396-97058d1d2722 from configured sleds +generated inventory collection d483be68-4bf3-4133-aed1-661cba8e1194 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 4, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: d5fd048a-8786-42d3-938e-820eae95d7f4 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] -generated blueprint 90650737-8142-47a6-9a48-a10efc487e57 based on parent blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 9e2e0774-3cf6-4f75-9a12-92db05c77b81 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] +generated blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece based on parent blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa blueprint source: planner with report: -planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: +planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts * waiting for NTP zones to appear in inventory on sleds: d81c6a84-79b8-4958-ae41-ea46c9b19763 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: d81c6a84-79b8-4958-ae41-ea46c9b19763 * missing NTP zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 @@ -6042,12 +7096,12 @@ planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: > blueprint-diff latest -from: blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 -to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 +from: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa +to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 10 -> 11): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 11 -> 12): host phase 2 contents: ------------------------------ @@ -6075,7 +7129,7 @@ to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off @@ -6083,16 +7137,16 @@ to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off omicron zones: @@ -6101,16 +7155,16 @@ to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset - expunged ⏳ fd00:1122:3344:103::21 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 0.0.1 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:103::21 └─ + expunged ✓ -+ internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 ++ internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 COCKROACHDB SETTINGS: @@ -6130,14 +7184,14 @@ to: blueprint 90650737-8142-47a6-9a48-a10efc487e57 internal DNS: * DNS zone: "control-plane.oxide.internal": -* name: _internal-ntp._tcp (records: 2 -> 3) -- SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal -- SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ SRV port 123 cc6fdaf4-0195-4cef-950d-7bacd7059787.host.control-plane.oxide.internal -+ SRV port 123 d5fd048a-8786-42d3-938e-820eae95d7f4.host.control-plane.oxide.internal -+ SRV port 123 f83ade6d-9ab1-4679-813b-b9457e039c0b.host.control-plane.oxide.internal -+ name: d5fd048a-8786-42d3-938e-820eae95d7f4.host (records: 1) ++ name: 9e2e0774-3cf6-4f75-9a12-92db05c77b81.host (records: 1) + AAAA fd00:1122:3344:103::29 +* name: _internal-ntp._tcp (records: 2 -> 3) +- SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal +- SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 123 9e2e0774-3cf6-4f75-9a12-92db05c77b81.host.control-plane.oxide.internal ++ SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal ++ SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -6148,25 +7202,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (90650737-8142-47a6-9a48-a10efc487e57) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e8b088a8-7da0-480b-a2dc-75ffef068ece) > inventory-generate -generated inventory collection a4dab274-0fff-47fa-bc22-b98d11ec54d2 from configured sleds +generated inventory collection 74b742c1-01da-4461-a011-785e2e11a5b2 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 5, num_eligible: 0, num_ineligible: 3 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa based on parent blueprint 90650737-8142-47a6-9a48-a10efc487e57 +generated blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 based on parent blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece blueprint source: planner with report: -planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: +planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f55647d4-5500-4ad3-893a-df45bd50d622 (crucible) * 5 remaining out-of-date zones @@ -6175,12 +7232,12 @@ planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: > blueprint-diff latest -from: blueprint 90650737-8142-47a6-9a48-a10efc487e57 -to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa +from: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece +to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 11 -> 12): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 12 -> 13): host phase 2 contents: ------------------------------ @@ -6208,7 +7265,7 @@ to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off @@ -6216,12 +7273,12 @@ to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 @@ -6234,15 +7291,15 @@ to: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa -------------------------------------------------------------------------------------------------------------------------- crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 in service fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - artifact: version 0.0.1 in service fd00:1122:3344:103::25 └─ + artifact: version 1.0.0 @@ -6273,25 +7330,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (2182613d-dc9f-41eb-9c6a-d33801849caa) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (810ea95a-4730-43dd-867e-1984aeb9d873) > inventory-generate -generated inventory collection d483be68-4bf3-4133-aed1-661cba8e1194 from configured sleds +generated inventory collection 78486156-ea1a-42a2-adc3-658ccd94ccd1 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 6, num_eligible: 0, num_ineligible: 2 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece based on parent blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa +generated blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d based on parent blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 blueprint source: planner with report: -planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: +planning report for blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (external_dns) * 4 remaining out-of-date zones @@ -6300,12 +7360,12 @@ planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: > blueprint-diff latest -from: blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa -to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece +from: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 12 -> 13): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 13 -> 14): host phase 2 contents: ------------------------------ @@ -6331,7 +7391,7 @@ to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off @@ -6340,11 +7400,11 @@ to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 @@ -6362,14 +7422,14 @@ to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset - in service fd00:1122:3344:103::23 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 - in service fd00:1122:3344:103::23 └─ + expunged ⏳ @@ -6391,11 +7451,11 @@ to: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece internal DNS: * DNS zone: "control-plane.oxide.internal": * name: _external-dns._tcp (records: 3 -> 2) -- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal -- SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal +- SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal +- SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal -+ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal -+ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal ++ SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 unchanged names: 49 (records: 61) @@ -6416,25 +7476,28 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e8b088a8-7da0-480b-a2dc-75ffef068ece) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) > inventory-generate -generated inventory collection 74b742c1-01da-4461-a011-785e2e11a5b2 from configured sleds +generated inventory collection 0f80a0f2-360f-4c64-9c35-3dc067dd2620 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 6, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 7, num_already_artifact: 7, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 based on parent blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece +generated blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e based on parent blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d blueprint source: planner with report: -planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: +planning report for blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts * discretionary zones placed: * external_dns zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 * zone updates waiting on discretionary zones @@ -6443,12 +7506,12 @@ planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: > blueprint-diff latest -from: blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece -to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +from: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d +to: blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 13 -> 14): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 14 -> 15): host phase 2 contents: ------------------------------ @@ -6476,7 +7539,7 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off @@ -6484,18 +7547,18 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off -+ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 b24bee8e-82a0-4b4d-a57c-77a1010f3e38 in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 0f0c883a-7a84-4064-b085-0af035edfb3d in service none none off ++ oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 bdadb3c9-f786-4fcd-a632-667a2e359065 in service none none off omicron zones: @@ -6505,16 +7568,16 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset - expunged ⏳ fd00:1122:3344:103::23 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:103::23 └─ + expunged ✓ -+ external_dns f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 artifact: version 1.0.0 in service fd00:1122:3344:103::2a ++ external_dns 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 artifact: version 1.0.0 in service fd00:1122:3344:103::2a COCKROACHDB SETTINGS: @@ -6534,14 +7597,14 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 internal DNS: * DNS zone: "control-plane.oxide.internal": -* name: _external-dns._tcp (records: 2 -> 3) -- SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal -- SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal -+ SRV port 5353 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal -+ SRV port 5353 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal -+ SRV port 5353 f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01.host.control-plane.oxide.internal -+ name: f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01.host (records: 1) ++ name: 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82.host (records: 1) + AAAA fd00:1122:3344:103::2a +* name: _external-dns._tcp (records: 2 -> 3) +- SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal +- SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal ++ SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal ++ SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal ++ SRV port 5353 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82.host.control-plane.oxide.internal unchanged names: 49 (records: 61) external DNS: @@ -6560,27 +7623,30 @@ external DNS: > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (810ea95a-4730-43dd-867e-1984aeb9d873) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (0a8d2f87-1d3e-4296-ba07-108940a7a57e) > inventory-generate -generated inventory collection 78486156-ea1a-42a2-adc3-658ccd94ccd1 from configured sleds +generated inventory collection c0548a65-0ee4-4876-a5e0-3384187c88cc from configured sleds > # Add Nexus zones on three sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 7, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d based on parent blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 +generated blueprint d69e1109-06be-4469-8876-4292dc7885d7 based on parent blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e blueprint source: planner with report: -planning report for blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d: +planning report for blueprint d69e1109-06be-4469-8876-4292dc7885d7: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * discretionary zones placed: * nexus zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 * nexus zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 @@ -6591,12 +7657,12 @@ planning report for blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d: > blueprint-diff latest -from: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 -to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d +from: blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e +to: blueprint d69e1109-06be-4469-8876-4292dc7885d7 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 15 -> 16): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 16 -> 17): host phase 2 contents: ------------------------------ @@ -6623,9 +7689,9 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 9f185688-315b-445a-824a-84ff526417f2 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 5c01fdbd-ff37-44b4-a17b-6d625e6fa48d in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off @@ -6634,19 +7700,19 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_c821c39d-2b2c-4c55-8874-ac12315ba1e4 a21812e1-1b80-4faa-9f2b-51189e0f6999 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_f83ade6d-9ab1-4679-813b-b9457e039c0b 7642e649-44d6-49b0-ba7c-cc3f6b6f2f9c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 -+ oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_c11077d3-926e-479e-9f4e-299f6cf3bf29 5bb9ab35-13d4-4341-9548-855ce40a1269 in service none none off ++ oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_9ae90740-7fdb-4073-ae43-048f2fca3d69 91f7f766-931f-43ef-99b4-2399f58c946b in service none none off omicron zones: @@ -6657,19 +7723,19 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 1.0.0 in service fd00:1122:3344:102::27 - crucible_pantry 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:102::2b - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset expunged ✓ fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset expunged ✓ fd00:1122:3344:102::24 - external_dns ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::2a - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset expunged ✓ fd00:1122:3344:1::1 - internal_dns c821c39d-2b2c-4c55-8874-ac12315ba1e4 artifact: version 1.0.0 in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset expunged ✓ fd00:1122:3344:102::21 - internal_ntp f83ade6d-9ab1-4679-813b-b9457e039c0b artifact: version 1.0.0 in service fd00:1122:3344:102::29 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -+ nexus c11077d3-926e-479e-9f4e-299f6cf3bf29 artifact: version 1.0.0 in service fd00:1122:3344:102::2c + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 in service fd00:1122:3344:102::22 ++ nexus 9ae90740-7fdb-4073-ae43-048f2fca3d69 artifact: version 1.0.0 in service fd00:1122:3344:102::2c - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 14 -> 15): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 15 -> 16): host phase 2 contents: ------------------------------ @@ -6695,29 +7761,29 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 44811c39-a4a2-4be3-85a6-954cf148e4b2 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns dedff151-ee94-4ead-a742-f973b39e21db in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_71f71743-8c73-43c6-b080-427ec28ef4c9 d8368353-9ae6-4ed0-99ad-21783f514ba6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off - oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_cc6fdaf4-0195-4cef-950d-7bacd7059787 5d6116c2-ade3-4246-8d02-5591d2622c10 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 -+ oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_923d968a-7d98-429b-8026-656d2e72af6a bc7fdd15-1a0a-4d95-a03a-112c027a5aca in service none none off ++ oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_d516d61b-fd96-46ad-a743-78eec814ee90 d2707e9c-d793-4a6c-8f77-2d3aa5a98390 in service none none off omicron zones: @@ -6727,19 +7793,19 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 1.0.0 in service fd00:1122:3344:101::26 - crucible_pantry 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::2a - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset expunged ✓ fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset expunged ✓ fd00:1122:3344:101::23 - external_dns e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::29 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset expunged ✓ fd00:1122:3344:2::1 - internal_dns 71f71743-8c73-43c6-b080-427ec28ef4c9 artifact: version 1.0.0 in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset expunged ✓ fd00:1122:3344:101::21 - internal_ntp cc6fdaf4-0195-4cef-950d-7bacd7059787 artifact: version 1.0.0 in service fd00:1122:3344:101::28 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 -+ nexus 923d968a-7d98-429b-8026-656d2e72af6a artifact: version 1.0.0 in service fd00:1122:3344:101::2b + crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::24 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 in service fd00:1122:3344:101::22 ++ nexus d516d61b-fd96-46ad-a743-78eec814ee90 artifact: version 1.0.0 in service fd00:1122:3344:101::2b - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 14 -> 15): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 15 -> 16): host phase 2 contents: ------------------------------ @@ -6766,8 +7832,8 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 850d04b8-c706-46e9-b405-a7a800b744b5 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns bdc35fc8-8541-4dfc-b9fa-db05eceb5c55 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 0f0c883a-7a84-4064-b085-0af035edfb3d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off @@ -6776,18 +7842,18 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_c88fcd7d-9509-470e-8c4f-3e6f09104cdc fed6b06e-ad3f-41c2-b7cd-e783462ec58c in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 b24bee8e-82a0-4b4d-a57c-77a1010f3e38 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 bdadb3c9-f786-4fcd-a632-667a2e359065 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_7fbd0103-d7f8-48a5-b95e-29bf812cac1f 49336223-f6df-4fe7-bd9f-95123c5622a9 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off - oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 -+ oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_3e2f206f-61d2-4dee-91e6-8fbcaf251df6 d51d6234-591b-49ee-9115-a44eec86d42d in service none none off ++ oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad ecb27354-877c-4a24-8982-1298a222ca1c in service none none off omicron zones: @@ -6797,16 +7863,16 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset expunged ✓ fd00:1122:3344:103::24 - crucible_pantry c88fcd7d-9509-470e-8c4f-3e6f09104cdc artifact: version 1.0.0 in service fd00:1122:3344:103::28 - external_dns f4d7ec7b-5e5c-4c90-97f2-2ac9d4588a01 artifact: version 1.0.0 in service fd00:1122:3344:103::2a - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset expunged ✓ fd00:1122:3344:103::23 - internal_dns 7fbd0103-d7f8-48a5-b95e-29bf812cac1f artifact: version 1.0.0 in service fd00:1122:3344:3::1 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset expunged ✓ fd00:1122:3344:3::1 - internal_ntp d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:103::29 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset expunged ✓ fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 -+ nexus 3e2f206f-61d2-4dee-91e6-8fbcaf251df6 artifact: version 1.0.0 in service fd00:1122:3344:103::2b + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 in service fd00:1122:3344:103::22 ++ nexus 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad artifact: version 1.0.0 in service fd00:1122:3344:103::2b COCKROACHDB SETTINGS: @@ -6826,22 +7892,22 @@ to: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d internal DNS: * DNS zone: "control-plane.oxide.internal": -+ name: 3e2f206f-61d2-4dee-91e6-8fbcaf251df6.host (records: 1) ++ name: 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host (records: 1) + AAAA fd00:1122:3344:103::2b -+ name: 923d968a-7d98-429b-8026-656d2e72af6a.host (records: 1) -+ AAAA fd00:1122:3344:101::2b ++ name: 9ae90740-7fdb-4073-ae43-048f2fca3d69.host (records: 1) ++ AAAA fd00:1122:3344:102::2c * name: _nexus._tcp (records: 3 -> 6) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12221 3e2f206f-61d2-4dee-91e6-8fbcaf251df6.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12221 923d968a-7d98-429b-8026-656d2e72af6a.host.control-plane.oxide.internal -+ SRV port 12221 c11077d3-926e-479e-9f4e-299f6cf3bf29.host.control-plane.oxide.internal -+ name: c11077d3-926e-479e-9f4e-299f6cf3bf29.host (records: 1) -+ AAAA fd00:1122:3344:102::2c ++ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal ++ name: d516d61b-fd96-46ad-a743-78eec814ee90.host (records: 1) ++ AAAA fd00:1122:3344:101::2b unchanged names: 50 (records: 62) external DNS: @@ -6854,8 +7920,8 @@ external DNS: + A 192.0.2.7 + A 192.0.2.3 + A 192.0.2.6 -+ A 192.0.2.5 + A 192.0.2.4 ++ A 192.0.2.5 unchanged names: 4 (records: 6) @@ -6863,34 +7929,37 @@ external DNS: > # Propagate configs to the sleds which should be running Nexus > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest -set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (d69e1109-06be-4469-8876-4292dc7885d7) > sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest -set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (d69e1109-06be-4469-8876-4292dc7885d7) > sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest -set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (67c074ef-c52e-4ff1-851a-598c04dc2c8d) +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (d69e1109-06be-4469-8876-4292dc7885d7) > inventory-generate -generated inventory collection 0f80a0f2-360f-4c64-9c35-3dc067dd2620 from configured sleds +generated inventory collection 7349431d-718a-4353-a1ee-357ce2aeeb28 from configured sleds > # Update the Nexus generation from 1 -> 2, initiating > # quiesce of the old Nexuses > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 9, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e based on parent blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d +generated blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 based on parent blueprint d69e1109-06be-4469-8876-4292dc7885d7 blueprint source: planner with report: -planning report for blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e: +planning report for blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 3 remaining out-of-date zones * 3 zones waiting to be expunged: * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus): image out-of-date, but zone's nexus_generation 1 is still active @@ -6901,8 +7970,8 @@ planning report for blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e: > blueprint-diff latest -from: blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d -to: blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e +from: blueprint d69e1109-06be-4469-8876-4292dc7885d7 +to: blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) @@ -6932,19 +8001,22 @@ external DNS: > # Expunge three Nexus zones, one at a time > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 9, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint d69e1109-06be-4469-8876-4292dc7885d7 based on parent blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e +generated blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 based on parent blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 blueprint source: planner with report: -planning report for blueprint d69e1109-06be-4469-8876-4292dc7885d7: +planning report for blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus) * 3 remaining out-of-date zones @@ -6954,18 +8026,20 @@ planning report for blueprint d69e1109-06be-4469-8876-4292dc7885d7: > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 based on parent blueprint d69e1109-06be-4469-8876-4292dc7885d7 +generated blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 based on parent blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 blueprint source: planner with report: -planning report for blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097: +planning report for blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus) * 2 remaining out-of-date zones @@ -6977,17 +8051,18 @@ INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 8, num_eligible: 0, num_ineligible: 1 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 based on parent blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 +generated blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf based on parent blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 blueprint source: planner with report: -planning report for blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6: +planning report for blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus) * 1 remaining out-of-date zone @@ -7008,9 +8083,9 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -generated blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 based on parent blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 +generated blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b based on parent blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf blueprint source: planner with report: -planning report for blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751: +planning report for blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index bce54bd528f..8c86a313423 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -115,8 +115,8 @@ pub enum Error { NoAvailableZpool { sled_id: SledUuid, kind: ZoneKind }, #[error("no Nexus zones exist in parent blueprint")] NoNexusZonesInParentBlueprint, - #[error("no active Nexus zones exist in parent blueprint")] - NoActiveNexusZonesInParentBlueprint, + #[error("no active Nexus zones exist in blueprint currently being built")] + NoActiveNexusZonesInBlueprint, #[error("conflicting values for active Nexus zones in parent blueprint")] ActiveNexusZoneGenerationConflictInParentBlueprint, #[error("no Boundary NTP zones exist in parent blueprint")] diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 885908c7208..305eb0aac24 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -21,6 +21,7 @@ use crate::planner::image_source::NoopConvertHostPhase2Contents; use crate::planner::image_source::NoopConvertZoneStatus; use crate::planner::omicron_zone_placement::PlacementError; use gateway_client::types::SpType; +use iddqd::IdOrdMap; use itertools::Itertools; use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; use nexus_sled_agent_shared::inventory::OmicronZoneImageSource; @@ -70,6 +71,8 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::str::FromStr; use std::sync::Arc; +use swrite::SWrite; +use swrite::swriteln; pub(crate) use self::image_source::NoopConvertGlobalIneligibleReason; pub(crate) use self::image_source::NoopConvertInfo; @@ -1880,12 +1883,10 @@ impl<'a> Planner<'a> { // system is in a corrupt state. The planner will not take any // actions until the error is resolved. // - // 4. (TODO: https://github.com/oxidecomputer/omicron/issues/8726) - // If any sleds' deployment units aren't at known versions after - // noop image source changes have been considered, then we shouldn't - // proceed with adding or updating zones. Again, this is driven - // primarily by the desire to minimize the number of versions of - // system software running at any time. + // 4. If any sleds' deployment units aren't at known versions, then we + // shouldn't proceed with adding zones or updating deployment units. + // Again, this is driven primarily by the desire to minimize the + // number of versions of system software running at any time. // // What does "any sleds" mean in this context? We don't need to care // about decommissioned or expunged sleds, so we consider in-service @@ -1955,7 +1956,71 @@ impl<'a> Planner<'a> { } } - // TODO: implement condition 4 above. + // Condition 4 above. + { + let mut sleds_with_non_artifact = BTreeMap::new(); + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + let mut zones_with_non_artifact = IdOrdMap::new(); + // Are all zone image sources set to Artifact? + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + match &z.image_source { + BlueprintZoneImageSource::InstallDataset => { + zones_with_non_artifact.insert_overwrite(z); + } + BlueprintZoneImageSource::Artifact { .. } => {} + } + } + + // TODO: (https://github.com/oxidecomputer/omicron/issues/8918) + // We should also check that the boot disk's host phase 2 + // image is a known version. + // + // Currently, the blueprint doesn't currently cache information + // about which disk is the boot disk. + // + // * Inventory does have this information, but if a sled isn't + // in inventory (due to, say, a transient network error), we + // won't be able to make that determination. + // + // * So we skip this check under the assumption that if all zone + // image sources are at known versions, the host phase 2 image + // is also most likely at a known version. + // + // We really should explicitly check that the host phase 2 image + // is known, though! + + if !zones_with_non_artifact.is_empty() { + sleds_with_non_artifact + .insert(sled_id, zones_with_non_artifact); + } + } + + if !sleds_with_non_artifact.is_empty() { + let mut reason = + "sleds have deployment units with image sources \ + not set to Artifact:\n" + .to_owned(); + for (sled_id, zones_with_non_artifact) in + &sleds_with_non_artifact + { + swriteln!( + reason, + "- sled {sled_id}: {} {}", + zones_with_non_artifact.len(), + if zones_with_non_artifact.len() == 1 { + "zone" + } else { + "zones" + } + ); + } + + reasons.push(reason); + } + } Ok(reasons) } @@ -2236,33 +2301,41 @@ impl<'a> Planner<'a> { fn lookup_current_nexus_image( &self, ) -> Result { - // Look up the active Nexus zone in the blueprint to get its image - if let Some(image) = self - .blueprint - .parent_blueprint() - .all_omicron_zones(BlueprintZoneDisposition::is_in_service) - .find_map(|(_, blueprint_zone)| { - if self.input.active_nexus_zones().contains(&blueprint_zone.id) - { - Some(blueprint_zone.image_source.clone()) - } else { - None + // Look up the active Nexus zone in the blueprint to get its image. + // + // Use the *current* blueprint, not the *parent* blueprint, because zone + // image sources are mutable. In particular, mupdate overrides and noop + // conversions can change the image source. + let mut image_source = None; + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + for zone in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + if self.input.active_nexus_zones().contains(&zone.id) { + image_source = Some(zone.image_source.clone()); + break; } - }) - { + } + } + + if let Some(image) = image_source { Ok(image) } else { - Err(Error::NoActiveNexusZonesInParentBlueprint) + Err(Error::NoActiveNexusZonesInBlueprint) } } fn lookup_current_nexus_generation(&self) -> Result { - // Look up the active Nexus zone in the blueprint to get its generation + // Look up the active Nexus zone in the blueprint to get its generation. + // + // The Nexus generation is immutable, so it's fine (and easier in this + // case) to look at the parent blueprint. self.blueprint .parent_blueprint() .find_generation_for_nexus(self.input.active_nexus_zones()) - .map_err(|_| Error::NoActiveNexusZonesInParentBlueprint)? - .ok_or(Error::NoActiveNexusZonesInParentBlueprint) + .map_err(|_| Error::NoActiveNexusZonesInBlueprint)? + .ok_or(Error::NoActiveNexusZonesInBlueprint) } // Returns whether the out-of-date Nexus zone is ready to be updated. @@ -2649,6 +2722,18 @@ pub(crate) mod test { .build(); verify_blueprint(&blueprint1); + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let input = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder") + .build(); + println!("{}", blueprint1.display()); // Now run the planner. It should do nothing because our initial @@ -2657,7 +2742,7 @@ pub(crate) mod test { let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, - &example.input, + &input, "no-op?", &example.collection, PlannerRng::from_seed((TEST_NAME, "bp2")), @@ -2836,7 +2921,7 @@ pub(crate) mod test { // Use our example system with one sled and one Nexus instance as a // starting point. - let (example, blueprint1) = + let (mut example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) .nsleds(1) .nexus_count(1) @@ -2848,7 +2933,18 @@ pub(crate) mod test { .next() .map(|sa| sa.sled_id) .unwrap(); - let input = example.input; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let input = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder") + .build(); let collection = example.collection; // This blueprint should only have 1 Nexus instance on the one sled we @@ -2938,7 +3034,15 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // This blueprint should only have 3 Nexus zones: one on each sled. assert_eq!(blueprint1.sleds.len(), 3); @@ -2954,7 +3058,10 @@ pub(crate) mod test { } // Now run the planner with a high number of target Nexus zones. - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); builder.policy_mut().target_nexus_zone_count = 14; let input = builder.build(); let blueprint2 = Planner::new_based_on( @@ -3024,8 +3131,20 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, mut blueprint1) = - example(&logctx.log, TEST_NAME); + let (mut example, mut blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); // This blueprint should have exactly 3 internal DNS zones: one on each // sled. @@ -3043,27 +3162,33 @@ pub(crate) mod test { // Try to run the planner with a high number of internal DNS zones; // it will fail because the target is > MAX_DNS_REDUNDANCY. - let mut builder = input.clone().into_builder(); - builder.policy_mut().target_internal_dns_zone_count = 14; - match Planner::new_based_on( - logctx.log.clone(), - &blueprint1, - &builder.build(), - "test_blueprint2", - &collection, - PlannerRng::from_entropy(), - ) - .expect("created planner") - .plan() { - Ok(_) => panic!("unexpected success"), - Err(err) => { - let err = InlineErrorChain::new(&err).to_string(); - assert!( - err.contains("can only have ") - && err.contains(" internal DNS servers"), - "unexpected error: {err}" - ); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); + builder.policy_mut().target_internal_dns_zone_count = 14; + + match Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &builder.build(), + "test_blueprint2", + &collection, + PlannerRng::from_entropy(), + ) + .expect("created planner") + .plan() + { + Ok(_) => panic!("unexpected success"), + Err(err) => { + let err = InlineErrorChain::new(&err).to_string(); + assert!( + err.contains("can only have ") + && err.contains(" internal DNS servers"), + "unexpected error: {err}" + ); + } } } @@ -3086,6 +3211,12 @@ pub(crate) mod test { }); } + let builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); + let input = builder.build(); + let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, @@ -3154,12 +3285,23 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // Expunge the first sled we see, which will result in a Nexus external // IP no longer being associated with a running zone, and a new Nexus // zone being added to one of the two remaining sleds. - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); let (sled_id, _) = builder.sleds_mut().iter_mut().next().expect("no sleds"); let sled_id = *sled_id; @@ -3261,7 +3403,21 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let input = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder") + .build(); // We should not be able to add any external DNS zones yet, // because we haven't give it any addresses (which currently @@ -3445,12 +3601,19 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (example, blueprint1) = + let (mut example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); let collection = example.collection; - let input = example.input; + // Set this chicken switch so that zones are added even though zones are + // currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); // Avoid churning on the quantity of Nexus and internal DNS zones - // we're okay staying at one each. @@ -3546,12 +3709,20 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (example, mut blueprint1) = + let (mut example, mut blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); let collection = example.collection; - let input = example.input; - let mut builder = input.into_builder(); + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); // Avoid churning on the quantity of Nexus and internal DNS zones - // we're okay staying at one each. @@ -3624,11 +3795,17 @@ pub(crate) mod test { static TEST_NAME: &str = "planner_disk_add_expunge_decommission"; let logctx = test_setup_log(TEST_NAME); - // Create an example system with a single sled - let (example, blueprint1) = - ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); + // Create an example system with two sleds. We're going to expunge one + // of these sleds. + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(2).build(); let mut collection = example.collection; - let input = example.input; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // The initial blueprint configuration has generation 2 let (sled_id, sled_config) = @@ -3643,7 +3820,10 @@ pub(crate) mod test { ); } - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); // Let's expunge a disk. Its disposition should change to `Expunged` // but its state should remain active. @@ -3987,15 +4167,23 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (example, blueprint1) = + let (mut example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) .nsleds(1) .nexus_count(2) .build(); let collection = example.collection; - let input = example.input; - let mut builder = input.into_builder(); + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); // Aside: Avoid churning on the quantity of Nexus zones - we're okay // staying at two. @@ -4130,10 +4318,15 @@ pub(crate) mod test { // and decommissioned sleds. (When we add more kinds of // non-provisionable states in the future, we'll have to add more // sleds.) - let (example, mut blueprint1) = + let (mut example, mut blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(5).build(); let collection = example.collection; - let input = example.input; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // This blueprint should only have 5 Nexus zones: one on each sled. assert_eq!(blueprint1.sleds.len(), 5); @@ -4150,7 +4343,10 @@ pub(crate) mod test { // Arbitrarily choose some of the sleds and mark them non-provisionable // in various ways. - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); let mut sleds_iter = builder.sleds_mut().iter_mut(); let nonprovisionable_sled_id = { @@ -4457,13 +4653,25 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let collection = example.collection; // Expunge one of the sleds. // // We expunge a sled via planning input using the builder so that disks // are properly taken into account. - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); let expunged_sled_id = { let mut iter = builder.sleds_mut().iter_mut(); let (sled_id, _) = iter.next().expect("at least one sled"); @@ -4738,7 +4946,15 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // We should start with CRUCIBLE_PANTRY_REDUNDANCY pantries spread out // to at most 1 per sled. Find one of the sleds running one. @@ -4760,7 +4976,10 @@ pub(crate) mod test { // (non-expunged) sled. let expunged_sled_id = pantry_sleds[0]; - let mut input_builder = input.into_builder(); + let mut input_builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); input_builder .sleds_mut() .get_mut(&expunged_sled_id) @@ -4812,7 +5031,15 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // We should start with one ClickHouse zone. Find out which sled it's on. let clickhouse_sleds = blueprint1 @@ -4830,7 +5057,10 @@ pub(crate) mod test { // Expunge the sled hosting ClickHouse and re-plan. The planner should // immediately replace the zone with one on another (non-expunged) sled. - let mut input_builder = input.into_builder(); + let mut input_builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); input_builder .sleds_mut() .get_mut(&clickhouse_sled) @@ -4882,9 +5112,17 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut collection, input, blueprint1) = example(&log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); + let mut collection = example.collection; verify_blueprint(&blueprint1); + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + // We shouldn't have a clickhouse cluster config, as we don't have a // clickhouse policy set yet assert!(blueprint1.clickhouse_cluster_config.is_none()); @@ -4892,7 +5130,10 @@ pub(crate) mod test { let target_servers = 2; // Enable clickhouse clusters via policy - let mut input_builder = input.into_builder(); + let mut input_builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); input_builder.policy_mut().clickhouse_policy = Some(clickhouse_policy(ClickhouseMode::Both { target_servers, @@ -5229,13 +5470,25 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut collection, input, blueprint1) = example(&log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&log, TEST_NAME).build(); + let mut collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let mut input_builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); let target_keepers = 3; let target_servers = 2; // Enable clickhouse clusters via policy - let mut input_builder = input.into_builder(); input_builder.policy_mut().clickhouse_policy = Some(clickhouse_policy(ClickhouseMode::Both { target_servers, @@ -5448,13 +5701,25 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (collection, input, blueprint1) = example(&log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&log, TEST_NAME).build(); + let collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); + + let mut input_builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); let target_keepers = 3; let target_servers = 2; // Enable clickhouse clusters via policy - let mut input_builder = input.into_builder(); input_builder.policy_mut().clickhouse_policy = Some(clickhouse_policy(ClickhouseMode::Both { target_servers, @@ -5832,7 +6097,15 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut collection, input, blueprint1) = example(&log, TEST_NAME); + let (mut example, blueprint1) = + ExampleSystemBuilder::new(&log, TEST_NAME).build(); + let mut collection = example.collection; + + // Set this chicken switch so that zones are added even though image + // sources are currently InstallDataset. + let mut config = example.system.get_planner_config(); + config.add_zones_with_mupdate_override = true; + example.system.set_planner_config(config); // Find a internal DNS zone we'll use for our test. let (sled_id, internal_dns_config) = blueprint1 @@ -5850,7 +6123,10 @@ pub(crate) mod test { // Expunge the disk used by the internal DNS zone. let input = { let internal_dns_zpool = &internal_dns_config.filesystem_pool; - let mut builder = input.into_builder(); + let mut builder = example + .system + .to_planning_input_builder() + .expect("created PlanningInputBuilder"); builder .sleds_mut() .get_mut(&sled_id) @@ -6090,11 +6366,12 @@ pub(crate) mod test { &logctx.log, rng.next_system_rng(), ) + .with_target_release_0_0_1() + .expect("set target release to 0.0.1") .build(); verify_blueprint(&blueprint1); - // We should start with no specified TUF repo and nothing to do. - assert!(example.input.tuf_repo().description().tuf_repo().is_none()); + // We should start with nothing to do. assert_planning_makes_no_changes( &logctx.log, &blueprint1, @@ -6103,13 +6380,17 @@ pub(crate) mod test { TEST_NAME, ); - // All zones should be sourced from the install dataset by default. + // All zones should be sourced from the initial 0.0.1 target release by + // default. assert!( blueprint1 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset + &z.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } )) ); @@ -6146,8 +6427,11 @@ pub(crate) mod test { let is_old_nexus = |zone: &BlueprintZoneConfig| -> bool { zone.zone_type.is_nexus() && matches!( - zone.image_source, - BlueprintZoneImageSource::InstallDataset + &zone.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } ) }; let is_up_to_date_nexus = |zone: &BlueprintZoneConfig| -> bool { @@ -6156,8 +6440,11 @@ pub(crate) mod test { let is_old_pantry = |zone: &BlueprintZoneConfig| -> bool { zone.zone_type.is_crucible_pantry() && matches!( - zone.image_source, - BlueprintZoneImageSource::InstallDataset + &zone.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } ) }; let is_up_to_date_pantry = |zone: &BlueprintZoneConfig| -> bool { @@ -6203,6 +6490,7 @@ pub(crate) mod test { { let blueprint_name = format!("expunging_crucible_pantry_{i}"); i += 1; + update_collection_from_blueprint(&mut example, &parent); let blueprint = Planner::new_based_on( log.clone(), @@ -6232,10 +6520,15 @@ pub(crate) mod test { // If the zone was previously in-service, it gets // expunged. if modified_zone.disposition.before.is_in_service() { - assert_eq!( - *modified_zone.image_source.before, - BlueprintZoneImageSource::InstallDataset - ); + assert!(matches!( + modified_zone.image_source.before, + BlueprintZoneImageSource::Artifact { + version, + hash: _, + } if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } + )); assert!( modified_zone.disposition.after.is_expunged(), ); @@ -6411,10 +6704,15 @@ pub(crate) mod test { *modified_zone.zone_type.before, BlueprintZoneType::Nexus(_) )); - assert_eq!( - *modified_zone.image_source.before, - BlueprintZoneImageSource::InstallDataset - ); + assert!(matches!( + modified_zone.image_source.before, + BlueprintZoneImageSource::Artifact { + version, + hash: _, + } if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } + )); // If the zone was previously in-service, it gets // expunged. @@ -6548,7 +6846,7 @@ pub(crate) mod test { // This test "starts" here -- we specify a new TUF repo with an updated // CockroachDB image. We create a new TUF repo where version of - // CockroachDB has been updated out of the install dataset. + // CockroachDB has been updated out of the 0.0.1 repo. // // The planner should avoid doing this update until it has confirmation // from inventory that the cluster is healthy. @@ -6585,7 +6883,7 @@ pub(crate) mod test { // Manually update all zones except Cockroach // // We just specified a new TUF repo, everything is going to shift from - // the install dataset to this new repo. + // the initial 0.0.1 repo to this new repo. for mut zone in blueprint .sleds .values_mut() @@ -6695,8 +6993,8 @@ pub(crate) mod test { // Once we have zero underreplicated ranges, we can start to update // Cockroach zones. // - // We'll update one zone at a time, from the install dataset to the - // new TUF repo artifact. + // We'll update one zone at a time, from the initial 0.0.1 artifact to + // the new TUF repo artifact. for i in 1..=COCKROACHDB_REDUNDANCY { // Keep setting this value in a loop; // "update_collection_from_blueprint" resets it. @@ -6746,7 +7044,7 @@ pub(crate) mod test { TEST_NAME, ); - // Validate that we do not flip back to the install dataset after + // Validate that we do not flip back to the 0.0.1 artifact after // performing the update. example.collection.cockroach_status = create_valid_looking_status(); example @@ -6780,6 +7078,8 @@ pub(crate) mod test { &logctx.log, rng.next_system_rng(), ) + .with_target_release_0_0_1() + .expect("set target release to 0.0.1") .build(); verify_blueprint(&blueprint); @@ -6962,19 +7262,22 @@ pub(crate) mod test { TEST_NAME, ); - // All zones should be sourced from the install dataset by default. + // All zones should be sourced from the 0.0.1 repo by default. assert!( blueprint .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset + &z.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } )) ); // This test "starts" here -- we specify a new TUF repo with an updated // Boundary NTP image. We create a new TUF repo where version of - // Boundary NTP has been updated out of the install dataset. + // Boundary NTP has been updated out of the 0.0.1 repo. // // The planner should avoid doing this update until it has confirmation // from inventory that the cluster is healthy. @@ -7011,7 +7314,7 @@ pub(crate) mod test { // Manually update all zones except boundary NTP // // We just specified a new TUF repo, everything is going to shift from - // the install dataset to this new repo. + // the 0.0.1 repo to this new repo. for mut zone in blueprint .sleds .values_mut() @@ -7031,8 +7334,11 @@ pub(crate) mod test { let is_old_boundary_ntp = |zone: &BlueprintZoneConfig| -> bool { zone.zone_type.is_boundary_ntp() && matches!( - zone.image_source, - BlueprintZoneImageSource::InstallDataset + &zone.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } ) }; let old_boundary_ntp_count = |blueprint: &Blueprint| -> usize { @@ -7145,8 +7451,8 @@ pub(crate) mod test { // Once all nodes are timesync'd, we can start to update boundary NTP // zones. // - // We'll update one zone at a time, from the install dataset to the - // new TUF repo artifact. + // We'll update one zone at a time, from the 0.0.1 artifact to the new + // TUF repo artifact. set_valid_looking_timesync(&mut example.collection); // @@ -7168,7 +7474,8 @@ pub(crate) mod test { { let summary = new_blueprint.diff_since_blueprint(&blueprint); eprintln!( - "diff between blueprints (should be expunging boundary NTP using install dataset):\n{}", + "diff between blueprints (should be expunging \ + boundary NTP using 0.0.1 artifact):\n{}", summary.display() ); eprintln!("{}", new_blueprint.source); @@ -7281,7 +7588,8 @@ pub(crate) mod test { // + Start expunging an internal NTP // // Cleanup: - // * Finish expunging the boundary NTP on the install dataset + // * Finish expunging the boundary NTP running off of the 0.0.1 + // artifact // let new_blueprint = Planner::new_based_on( @@ -7361,7 +7669,7 @@ pub(crate) mod test { TEST_NAME, ); - // Validate that we do not flip back to the install dataset after + // Validate that we do not flip back to the 0.0.1 artifact after // performing the update, even if we lose timesync data. example.collection.ntp_timesync = IdOrdMap::new(); assert_planning_makes_no_changes( @@ -7387,22 +7695,27 @@ pub(crate) mod test { &logctx.log, rng.next_system_rng(), ) + .with_target_release_0_0_1() + .expect("set target release to 0.0.1") .build(); verify_blueprint(&blueprint); - // All zones should be sourced from the install dataset by default. + // All zones should be sourced from the initial TUF repo by default. assert!( blueprint .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset + &z.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } )) ); // This test "starts" here -- we specify a new TUF repo with an updated // Internal DNS image. We create a new TUF repo where version of - // Internal DNS has been updated out of the install dataset. + // Internal DNS has been updated to "1.0.0-freeform". // // The planner should avoid doing this update until it has confirmation // from inventory that the Internal DNS servers are ready. @@ -7440,7 +7753,7 @@ pub(crate) mod test { // Manually update all zones except Internal DNS // // We just specified a new TUF repo, everything is going to shift from - // the install dataset to this new repo. + // the 0.0.1 repo to this new repo. for mut zone in blueprint .sleds .values_mut() @@ -7460,8 +7773,11 @@ pub(crate) mod test { let is_old_internal_dns = |zone: &BlueprintZoneConfig| -> bool { zone.zone_type.is_internal_dns() && matches!( - zone.image_source, - BlueprintZoneImageSource::InstallDataset + &zone.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } ) }; let is_up_to_date_internal_dns = |zone: &BlueprintZoneConfig| -> bool { @@ -7544,8 +7860,8 @@ pub(crate) mod test { // Once we have valid DNS statuses, we can start to update Internal DNS // zones. // - // We'll update one zone at a time, from the install dataset to the - // new TUF repo artifact. + // We'll update one zone at a time, from the 0.0.1 artifact to the new + // TUF repo artifact. for i in 1..=INTERNAL_DNS_REDUNDANCY { example.collection.internal_dns_generation_status = create_valid_looking_status(&blueprint); @@ -7630,7 +7946,7 @@ pub(crate) mod test { TEST_NAME, ); - // Validate that we do not flip back to the install dataset after + // Validate that we do not flip back to the 0.0.1 artifact after // performing the update. example.collection.internal_dns_generation_status = IdOrdMap::new(); assert_planning_makes_no_changes( @@ -7657,16 +7973,21 @@ pub(crate) mod test { &logctx.log, rng.next_system_rng(), ) + .with_target_release_0_0_1() + .expect("set target release to 0.0.1") .build(); verify_blueprint(&blueprint1); - // All zones should be sourced from the install dataset by default. + // All zones should be sourced from the 0.0.1 repo by default. assert!( blueprint1 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset + &z.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } )) ); @@ -7910,6 +8231,30 @@ pub(crate) mod test { ); } + // Asserts that a blueprint, if generated, is not trying to bump the + // Nexus generation number. + #[track_caller] + fn assert_child_bp_has_no_nexus_generation_bump( + &self, + child_blueprint: &Blueprint, + ) { + verify_blueprint(&child_blueprint); + let summary = child_blueprint.diff_since_blueprint(&self.blueprint); + + let BlueprintSource::Planner(report) = &child_blueprint.source + else { + panic!("Child blueprint has no associated report"); + }; + + assert!( + report.nexus_generation_bump.is_empty(), + "Blueprint Summary: {}\n\ + Nexus generation bump is not empty: {}", + summary.display(), + report, + ); + } + // Asserts that a new blueprint, if generated, will have no report. // // This function explicitly ignores the "noop_image_source" report. @@ -7935,7 +8280,7 @@ pub(crate) mod test { && report.zone_updates.is_empty() && report.nexus_generation_bump.is_empty() && report.cockroachdb_settings.is_empty(), - "Blueprint Summary: {}\n + "Blueprint Summary: {}\n\ Planning report is not empty: {}", summary.display(), report, @@ -7978,7 +8323,7 @@ pub(crate) mod test { // repo. let new_bp = bp_generator.plan_new_blueprint("no-op"); bp_generator.assert_child_bp_makes_no_changes(&new_bp); - bp_generator.assert_child_bp_has_no_report(&new_bp); + bp_generator.assert_child_bp_has_no_nexus_generation_bump(&new_bp); // Set up a TUF repo with new artifacts let artifact_version_1 = @@ -8136,7 +8481,7 @@ pub(crate) mod test { // repo. let new_bp = bp_generator.plan_new_blueprint("no-op"); bp_generator.assert_child_bp_makes_no_changes(&new_bp); - bp_generator.assert_child_bp_has_no_report(&new_bp); + bp_generator.assert_child_bp_has_no_nexus_generation_bump(&new_bp); // Initially, all zones should be sourced from the install dataset assert!( diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 3e0e162f4c0..a5a5857bd79 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -328,6 +328,15 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd blueprint source: planner with report: planning report for blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled d67ce8f0-a691-4010-b414-420d82e80527: 14 zones + - sled fefcf4cf-f7e7-46b3-b629-058526ce440e: 14 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zones placed: * crucible_pantry zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset * nexus zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 9d7021ad581..bf96b85473b 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -516,6 +516,16 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b blueprint source: planner with report: planning report for blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - sleds have deployment units with image sources not set to Artifact: + - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: 15 zones + - sled 75bc286f-2b4b-482c-9431-59272af529da: 12 zones + - sled affab35f-600a-4109-8ea0-34a067a4e0bc: 12 zones + +* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option * discretionary zones placed: * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index b4a55da6e30..8434779a96f 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -359,7 +359,14 @@ mod test { version: 1, config: ReconfiguratorConfig { planner_enabled: true, - planner_config: PlannerConfig::default(), + planner_config: PlannerConfig { + // Set this config to true because we'd like to test + // adding zones even if no target release is set. In the + // future, we'll allow adding zones if no target release + // has ever been set, in which case we can go back to + // setting this field to false. + add_zones_with_mupdate_override: true, + }, }, time_modified: now_db_precision(), }), diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 343f91b8169..67ea81dcc03 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -1066,6 +1066,16 @@ impl IdMappable for BlueprintZoneConfig { } } +impl IdOrdItem for BlueprintZoneConfig { + type Key<'a> = OmicronZoneUuid; + + fn key(&self) -> Self::Key<'_> { + self.id + } + + id_upcast!(); +} + impl BlueprintZoneConfig { /// Returns the underlay IP address associated with this zone. /// From 0f4d4c844a6e24852f73af48166f5f18b8239fbf Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Tue, 23 Sep 2025 14:01:59 -0400 Subject: [PATCH 05/18] PlanningReport: Remove `blueprint_id` (#9052) Planning reports are contained in `Blueprint`s, which have an ID. Prior to this PR we duplicated the containing blueprint's ID. This bit @davepacheco and me in a couple different (admittedly unusual) testing contexts where we were duplicating blueprints and making changes, not realizing we produced a new blueprint with a different ID but carrying a report that still pointed to the original blueprint's ID. The only thing we lose here is that the display output of the planning report can no longer say what blueprint it's for, but I think that's fine - all the places where we want to display a report, we already know the blueprint ID. --- .../output/cmds-add-sled-no-disks-stdout | 4 +- .../tests/output/cmds-example-stdout | 4 +- ...ds-expunge-newly-added-external-dns-stdout | 4 +- ...ds-expunge-newly-added-internal-dns-stdout | 2 +- .../output/cmds-mupdate-update-flow-stdout | 26 ++-- .../output/cmds-noop-image-source-stdout | 4 +- .../tests/output/cmds-target-release-stdout | 128 +++++++++--------- nexus/db-model/src/deployment.rs | 12 +- .../db-queries/src/db/datastore/deployment.rs | 5 +- nexus/reconfigurator/planning/src/planner.rs | 2 - .../planner_decommissions_sleds_bp2.txt | 2 +- .../output/planner_nonprovisionable_bp2.txt | 2 +- .../app/background/tasks/blueprint_planner.rs | 7 +- nexus/types/src/deployment/planning_report.rs | 16 +-- openapi/nexus-internal.json | 9 -- 15 files changed, 104 insertions(+), 123 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index 167705a3cdf..ece6303ea98 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -47,7 +47,7 @@ planner config updated: INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: -planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planning report: planner config: add zones with mupdate override: true @@ -296,7 +296,7 @@ parent: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planning report: planner config: add zones with mupdate override: true diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index aa599ee1139..b781a454cb2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -601,7 +601,7 @@ T ENA ID PARENT INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: -planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41: +planning report: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: @@ -1846,7 +1846,7 @@ INTERNAL DNS STATUS INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: -planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41: +planning report: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index 4f5c22d684f..f1334083bf1 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -851,7 +851,7 @@ planner config updated: INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 based on parent blueprint 366b0b68-d80e-4bc1-abd3-dc69837847e0 blueprint source: planner with report: -planning report for blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2: +planning report: planner config: add zones with mupdate override: true @@ -1360,7 +1360,7 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2: +planning report: planner config: add zones with mupdate override: true diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index bc8ea7f2ba8..f5b7e9bcd70 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -658,7 +658,7 @@ planner config updated: INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: -planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +planning report: planner config: add zones with mupdate override: true diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 1a89e3d3aec..47bd97b5e97 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -504,7 +504,7 @@ INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0 INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (203fa72c-85c1-466a-8ed3-338ee029530d) generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 blueprint source: planner with report: -planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: +planning report: * zone adds waiting on blockers * zone adds and updates are blocked: - current target release generation (2) is lower than minimum required by blueprint (3) @@ -786,7 +786,7 @@ INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0 INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (203fa72c-85c1-466a-8ed3-338ee029530d) generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 blueprint source: planner with report: -planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: +planning report: * zone adds waiting on blockers * zone adds and updates are blocked: - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 @@ -918,7 +918,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (1c0ce176-6dc8-4a90-adea-d4a8000751da) generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b blueprint source: planner with report: -planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: +planning report: * noop converting 6/6 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * zone adds waiting on blockers * zone adds and updates are blocked: @@ -1108,7 +1108,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b generated blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 based on parent blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b blueprint source: planner with report: -planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * zone adds waiting on blockers * zone adds and updates are blocked: @@ -1290,7 +1290,7 @@ parent: c1a0d242-9160-40f4-96ae-61f8f40a0b1b PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * zone adds waiting on blockers * zone adds and updates are blocked: @@ -1437,7 +1437,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b generated blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 based on parent blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 blueprint source: planner with report: -planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 6/6 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 * zone adds waiting on blockers @@ -1618,7 +1618,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 6/6 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 * zone adds waiting on blockers @@ -1737,7 +1737,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 blueprint source: planner with report: -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * zone adds waiting on blockers @@ -1806,7 +1806,7 @@ INFO configuring MGS-driven update, artifact_version: 2.0.0, artifact_hash: 8f89 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * noop converting 7/7 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c @@ -1991,7 +1991,7 @@ parent: 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * noop converting 7/7 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c @@ -2178,7 +2178,7 @@ INFO ran out of boards for MGS-driven update INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 353b3b65-20f7-48c3-88f7-495bd5d31545 (service), zone_kind: Clickhouse, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 466a9f29-62bf-4e63-924a-b9efdb86afec (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 62620961-fc4a-481e-968b-f5acbac0dc63 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 99e2f30b-3174-40bf-a78a-90da8abba8ca (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: bd354eef-d8a6-4165-9124-283fb5e46d77 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01 (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f55647d4-5500-4ad3-893a-df45bd50d622 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }] generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 blueprint source: planner with report: -planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 7 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts @@ -2418,7 +2418,7 @@ INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-495 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 blueprint source: planner with report: -planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 7 zones are already from artifacts * skipping noop zone image source check on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b: all 0 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts @@ -2550,7 +2550,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO altered physical disks, sled_id: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b, sled_edits: SledEditCounts { disks: EditCounts { added: 10, updated: 0, expunged: 0, removed: 0 }, datasets: EditCounts { added: 20, updated: 0, expunged: 0, removed: 0 }, zones: EditCounts { added: 0, updated: 0, expunged: 0, removed: 0 } } generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c blueprint source: planner with report: -planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: +planning report: planner config: add zones with mupdate override: true diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index f5814c9fef2..1de42a66bb2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -182,7 +182,7 @@ INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-e INFO skipped noop image source check on sled, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, reason: sled not found in inventory generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 blueprint source: planner with report: -planning report for blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4: +planning report: * noop converting 6/6 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting 5/6 install-dataset zones to artifact store on sled aff6c093-197d-42c5-ad80-9f10ba051a34 * zone adds waiting on blockers @@ -430,7 +430,7 @@ INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noo INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: -planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +planning report: * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 6 zones are already from artifacts * noop converting 2/2 install-dataset zones to artifact store on sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e * zone adds waiting on blockers diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 313caa94393..08eaa6fd97a 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -142,7 +142,7 @@ INFO configuring MGS-driven update, artifact_version: 0.0.1, artifact_hash: 244d INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: -planning report for blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1: +planning report: * noop converting 9/9 install-dataset zones to artifact store on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * noop converting 8/8 install-dataset zones to artifact store on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting 8/8 install-dataset zones to artifact store on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 @@ -1114,7 +1114,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: -planning report for blueprint af934083-59b5-4bf6-8966-6fb5292c29e1: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1178,7 +1178,7 @@ INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint df06bb57-ad42-4431-9206-abff322896c7 based on parent blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 blueprint source: planner with report: -planning report for blueprint df06bb57-ad42-4431-9206-abff322896c7: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1244,7 +1244,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba based on parent blueprint df06bb57-ad42-4431-9206-abff322896c7 blueprint source: planner with report: -planning report for blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1319,7 +1319,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 6846 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba blueprint source: planner with report: -planning report for blueprint 9034c710-3e57-45f3-99e5-4316145e87ac: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1395,7 +1395,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac blueprint source: planner with report: -planning report for blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1520,7 +1520,7 @@ INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 blueprint source: planner with report: -planning report for blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1585,7 +1585,7 @@ INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 blueprint source: planner with report: -planning report for blueprint 626487fa-7139-45ec-8416-902271fc730b: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1649,7 +1649,7 @@ INFO keeping apparently-impossible MGS-driven update (waiting for recent update INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b blueprint source: planner with report: -planning report for blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1715,7 +1715,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 based on parent blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b blueprint source: planner with report: -planning report for blueprint afb09faf-a586-4483-9289-04d4f1d8ba23: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1792,7 +1792,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 based on parent blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 blueprint source: planner with report: -planning report for blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1864,7 +1864,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 based on parent blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 blueprint source: planner with report: -planning report for blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -1940,7 +1940,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 blueprint source: planner with report: -planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2012,7 +2012,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 6846 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 based on parent blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 blueprint source: planner with report: -planning report for blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2087,7 +2087,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 6846 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c based on parent blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 blueprint source: planner with report: -planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2160,7 +2160,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f based on parent blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c blueprint source: planner with report: -planning report for blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2300,7 +2300,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 based on parent blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f blueprint source: planner with report: -planning report for blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2378,7 +2378,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 5b0f INFO ran out of boards for MGS-driven update generated blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 based on parent blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 blueprint source: planner with report: -planning report for blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2449,7 +2449,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint b82656b0-a9be-433d-83d0-e2bdf371777a based on parent blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 blueprint source: planner with report: -planning report for blueprint b82656b0-a9be-433d-83d0-e2bdf371777a: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2524,7 +2524,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a blueprint source: planner with report: -planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2601,7 +2601,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 based on parent blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 blueprint source: planner with report: -planning report for blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2675,7 +2675,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: d11e INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 based on parent blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 blueprint source: planner with report: -planning report for blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2747,7 +2747,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: 6846 INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint e54a0836-53e1-4948-a3af-0b77165289b5 based on parent blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 blueprint source: planner with report: -planning report for blueprint e54a0836-53e1-4948-a3af-0b77165289b5: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2818,7 +2818,7 @@ INFO configuring MGS-driven update, artifact_version: 1.0.0, artifact_hash: b99d INFO reached maximum number of pending MGS-driven updates, max: 1 generated blueprint 459a45a5-616e-421f-873b-2fb08c36205c based on parent blueprint e54a0836-53e1-4948-a3af-0b77165289b5 blueprint source: planner with report: -planning report for blueprint 459a45a5-616e-421f-873b-2fb08c36205c: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -2955,7 +2955,7 @@ INFO skipping board for MGS-driven update, serial_number: serial1, part_number: INFO ran out of boards for MGS-driven update generated blueprint b2295597-5788-482e-acf9-1731ec63fbd2 based on parent blueprint 459a45a5-616e-421f-873b-2fb08c36205c blueprint source: planner with report: -planning report for blueprint b2295597-5788-482e-acf9-1731ec63fbd2: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3089,7 +3089,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 6fad8fd4-e825-433f-b76d-495484e068ce based on parent blueprint b2295597-5788-482e-acf9-1731ec63fbd2 blueprint source: planner with report: -planning report for blueprint 6fad8fd4-e825-433f-b76d-495484e068ce: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3229,7 +3229,7 @@ INFO ran out of boards for MGS-driven update INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: ba87399e-e9b7-4ee4-8cb7-0032822630e9 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint 24b6e243-100c-428d-8ea6-35b504226f55 based on parent blueprint 6fad8fd4-e825-433f-b76d-495484e068ce blueprint source: planner with report: -planning report for blueprint 24b6e243-100c-428d-8ea6-35b504226f55: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3363,7 +3363,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce based on parent blueprint 24b6e243-100c-428d-8ea6-35b504226f55 blueprint source: planner with report: -planning report for blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3513,7 +3513,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 based on parent blueprint 79fff7a2-2495-4c75-8465-4dc01bab48ce blueprint source: planner with report: -planning report for blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3664,7 +3664,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d based on parent blueprint 3bcc37b2-0c0b-44d0-b4ed-3bcb605e4312 blueprint source: planner with report: -planning report for blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3793,7 +3793,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint e2125c83-b255-45c9-bc9b-802cff09a812 based on parent blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d blueprint source: planner with report: -planning report for blueprint e2125c83-b255-45c9-bc9b-802cff09a812: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -3946,7 +3946,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 based on parent blueprint e2125c83-b255-45c9-bc9b-802cff09a812 blueprint source: planner with report: -planning report for blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4100,7 +4100,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c based on parent blueprint f4a6848e-d13c-46e1-8c6a-944f886d7ba3 blueprint source: planner with report: -planning report for blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4241,7 +4241,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 based on parent blueprint 834e4dbe-3b71-443d-bd4c-20e8253abc0c blueprint source: planner with report: -planning report for blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 8 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4383,7 +4383,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 based on parent blueprint d9c5c5e3-c532-4c45-9ef5-22cb00f6a2e1 blueprint source: planner with report: -planning report for blueprint e2deb7c0-2262-49fe-855f-4250c22afb36: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4517,7 +4517,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf based on parent blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 blueprint source: planner with report: -planning report for blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4654,7 +4654,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d based on parent blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf blueprint source: planner with report: -planning report for blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4776,7 +4776,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 60b55d33-5fec-4277-9864-935197eaead7 based on parent blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d blueprint source: planner with report: -planning report for blueprint 60b55d33-5fec-4277-9864-935197eaead7: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -4918,7 +4918,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 based on parent blueprint 60b55d33-5fec-4277-9864-935197eaead7 blueprint source: planner with report: -planning report for blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5061,7 +5061,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 based on parent blueprint aa13f40f-41ff-4b68-bee1-df2e1f805544 blueprint source: planner with report: -planning report for blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5185,7 +5185,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a based on parent blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 blueprint source: planner with report: -planning report for blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5319,7 +5319,7 @@ INFO ran out of boards for MGS-driven update INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: e14f91b0-0c41-48a0-919d-e5078d2b89b0 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 based on parent blueprint 02078c95-3d58-4b7b-a03f-9b160361c50a blueprint source: planner with report: -planning report for blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5453,7 +5453,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 based on parent blueprint e7a01ffc-6b0e-408b-917b-b1efe18b3110 blueprint source: planner with report: -planning report for blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5600,7 +5600,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec based on parent blueprint 880e2ffc-8187-4275-a2f3-1b36aa2f4482 blueprint source: planner with report: -planning report for blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5748,7 +5748,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint a2c6496d-98fc-444d-aa36-99508aa72367 based on parent blueprint c4a20bcb-1a71-4e88-97b4-36d16f55daec blueprint source: planner with report: -planning report for blueprint a2c6496d-98fc-444d-aa36-99508aa72367: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -5886,7 +5886,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 based on parent blueprint a2c6496d-98fc-444d-aa36-99508aa72367 blueprint source: planner with report: -planning report for blueprint 6ed56354-5941-40d1-a06c-b0e940701d52: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 7 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6025,7 +6025,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 based on parent blueprint 6ed56354-5941-40d1-a06c-b0e940701d52 blueprint source: planner with report: -planning report for blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6159,7 +6159,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 based on parent blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 blueprint source: planner with report: -planning report for blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6281,7 +6281,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 based on parent blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 blueprint source: planner with report: -planning report for blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6411,7 +6411,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 based on parent blueprint 2b89e0d7-f15b-4474-8ac4-85959ed1bc88 blueprint source: planner with report: -planning report for blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts @@ -6542,7 +6542,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 59630e63-c953-4807-9e84-9e750a79f68e based on parent blueprint 7f6b7297-c2bc-4f67-b3c0-c8e555ebbdc4 blueprint source: planner with report: -planning report for blueprint 59630e63-c953-4807-9e84-9e750a79f68e: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6665,7 +6665,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 based on parent blueprint 59630e63-c953-4807-9e84-9e750a79f68e blueprint source: planner with report: -planning report for blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -6806,7 +6806,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 90650737-8142-47a6-9a48-a10efc487e57 based on parent blueprint e93650dc-b5ba-4ec7-8550-9171c1ada194 blueprint source: planner with report: -planning report for blueprint 90650737-8142-47a6-9a48-a10efc487e57: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts @@ -6948,7 +6948,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa based on parent blueprint 90650737-8142-47a6-9a48-a10efc487e57 blueprint source: planner with report: -planning report for blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -7084,7 +7084,7 @@ INFO ran out of boards for MGS-driven update INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 9e2e0774-3cf6-4f75-9a12-92db05c77b81 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") } } }] generated blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece based on parent blueprint 2182613d-dc9f-41eb-9c6a-d33801849caa blueprint source: planner with report: -planning report for blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts @@ -7220,7 +7220,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 based on parent blueprint e8b088a8-7da0-480b-a2dc-75ffef068ece blueprint source: planner with report: -planning report for blueprint 810ea95a-4730-43dd-867e-1984aeb9d873: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -7348,7 +7348,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d based on parent blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 blueprint source: planner with report: -planning report for blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -7494,7 +7494,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e based on parent blueprint 67c074ef-c52e-4ff1-851a-598c04dc2c8d blueprint source: planner with report: -planning report for blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 7 zones are already from artifacts @@ -7643,7 +7643,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint d69e1109-06be-4469-8876-4292dc7885d7 based on parent blueprint 0a8d2f87-1d3e-4296-ba07-108940a7a57e blueprint source: planner with report: -planning report for blueprint d69e1109-06be-4469-8876-4292dc7885d7: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts @@ -7956,7 +7956,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 based on parent blueprint d69e1109-06be-4469-8876-4292dc7885d7 blueprint source: planner with report: -planning report for blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts @@ -8013,7 +8013,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 based on parent blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 blueprint source: planner with report: -planning report for blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts @@ -8036,7 +8036,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 based on parent blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 blueprint source: planner with report: -planning report for blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts @@ -8059,7 +8059,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf based on parent blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 blueprint source: planner with report: -planning report for blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts @@ -8085,7 +8085,7 @@ INFO skipping board for MGS-driven update, serial_number: serial2, part_number: INFO ran out of boards for MGS-driven update generated blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b based on parent blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf blueprint source: planner with report: -planning report for blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b: +planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index dc0a413b405..c3d68c0a21e 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -1573,11 +1573,11 @@ pub struct DebugLogBlueprintPlanning { pub debug_blob: serde_json::Value, } -impl TryFrom> for DebugLogBlueprintPlanning { - type Error = serde_json::Error; - - fn try_from(report: Arc) -> Result { - let blueprint_id = report.blueprint_id.into(); +impl DebugLogBlueprintPlanning { + pub fn new( + blueprint_id: BlueprintUuid, + report: Arc, + ) -> Result { let report = serde_json::to_value(report)?; // We explicitly _don't_ define a struct describing the format of @@ -1595,6 +1595,6 @@ impl TryFrom> for DebugLogBlueprintPlanning { "report": report, }); - Ok(Self { blueprint_id, debug_blob }) + Ok(Self { blueprint_id: blueprint_id.into(), debug_blob }) } } diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 6abb69a967e..aefa3f96eb5 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -512,7 +512,10 @@ impl DataStore { // Serialize and insert a debug log for the planning report // created with this blueprint, if we have one. if let BlueprintSource::Planner(report) = &blueprint.source { - match DebugLogBlueprintPlanning::try_from(report.clone()) { + match DebugLogBlueprintPlanning::new( + blueprint_id, + report.clone(), + ) { Ok(debug_log) => { use nexus_db_schema::schema::debug_log_blueprint_planning::dsl; let _ = diesel::insert_into( diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 305eb0aac24..dcad6a5366e 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -283,7 +283,6 @@ impl<'a> Planner<'a> { let cockroachdb_settings = self.do_plan_cockroachdb_settings(); Ok(PlanningReport { - blueprint_id: self.blueprint.new_blueprint_id(), planner_config: *self.input.planner_config(), expunge, decommission, @@ -8054,7 +8053,6 @@ pub(crate) mod test { let BlueprintSource::Planner(report) = &blueprint.source else { panic!("unexpected source: {:?}", blueprint.source); }; - assert_eq!(report.blueprint_id, blueprint.id); eprintln!("{report}\n"); // TODO: more report testing diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index a5a5857bd79..fe3bed2dc01 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -327,7 +327,7 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e: +planning report: planner config: add zones with mupdate override: true diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index bf96b85473b..558b3553ca3 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -515,7 +515,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b PENDING MGS-MANAGED UPDATES: 0 blueprint source: planner with report: -planning report for blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6: +planning report: planner config: add zones with mupdate override: true diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index 8434779a96f..b933ee55e8c 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -275,7 +275,7 @@ impl BlueprintPlanner { generating an empty planning report"; "source" => ?&blueprint.source, ); - Arc::new(PlanningReport::new(blueprint.id)) + Arc::new(PlanningReport::new()) } }; self.tx_blueprint.send_replace(Some(Arc::new((target, blueprint)))); @@ -391,10 +391,9 @@ mod test { BlueprintPlannerStatus::Targeted { parent_blueprint_id, blueprint_id, - report, + report: _, } if parent_blueprint_id == initial_blueprint.id - && blueprint_id != initial_blueprint.id - && blueprint_id == report.blueprint_id => + && blueprint_id != initial_blueprint.id => { blueprint_id } diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index 8880fd65847..f28a62441e9 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -15,7 +15,6 @@ use daft::Diffable; use indent_write::fmt::IndentWriter; use omicron_common::api::external::Generation; use omicron_common::policy::COCKROACHDB_REDUNDANCY; -use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; @@ -51,9 +50,6 @@ use std::fmt::Write; )] #[must_use = "an unread report is not actionable"] pub struct PlanningReport { - /// The blueprint produced by the planning run this report describes. - pub blueprint_id: BlueprintUuid, - /// The configuration in effect for this planning run. pub planner_config: PlannerConfig, @@ -69,9 +65,8 @@ pub struct PlanningReport { } impl PlanningReport { - pub fn new(blueprint_id: BlueprintUuid) -> Self { + pub fn new() -> Self { Self { - blueprint_id, planner_config: PlannerConfig::default(), expunge: PlanningExpungeStepReport::new(), decommission: PlanningDecommissionStepReport::new(), @@ -101,14 +96,9 @@ impl PlanningReport { impl fmt::Display for PlanningReport { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_empty() { - writeln!( - f, - "empty planning report for blueprint {}.", - self.blueprint_id, - )?; + writeln!(f, "empty planning report")?; } else { let Self { - blueprint_id, planner_config, expunge, decommission, @@ -119,7 +109,7 @@ impl fmt::Display for PlanningReport { nexus_generation_bump, cockroachdb_settings, } = self; - writeln!(f, "planning report for blueprint {blueprint_id}:")?; + writeln!(f, "planning report:")?; if *planner_config != PlannerConfig::default() { writeln!(f, "planner config:\n{}", planner_config.display())?; } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 8c8f8957d77..2b1e047683a 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3087,14 +3087,6 @@ "add": { "$ref": "#/components/schemas/PlanningAddStepReport" }, - "blueprint_id": { - "description": "The blueprint produced by the planning run this report describes.", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, "cockroachdb_settings": { "$ref": "#/components/schemas/PlanningCockroachdbSettingsStepReport" }, @@ -3133,7 +3125,6 @@ }, "required": [ "add", - "blueprint_id", "cockroachdb_settings", "decommission", "expunge", From 3870c8e24a0d6bbf8336ee631cd1d31bf844f322 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 23 Sep 2025 15:56:13 -0700 Subject: [PATCH 06/18] [1/n] create a nexus-lockstep service (#8983) First step of #8902. It's enough work to get Nexus to stand up another HTTP service that this is worth its own PR ahead of moving APIs out of nexus-internal and into nexus-lockstep. --- Cargo.lock | 30 + Cargo.toml | 6 + clients/nexus-lockstep-client/Cargo.toml | 20 + clients/nexus-lockstep-client/src/lib.rs | 24 + common/src/address.rs | 1 + dev-tools/ls-apis/api-manifest.toml | 9 + dev-tools/ls-apis/tests/api_dependencies.out | 2 + dev-tools/omicron-dev/src/main.rs | 4 + dev-tools/openapi-manager/Cargo.toml | 7 +- dev-tools/openapi-manager/src/omicron.rs | 10 + .../tests/output/cmds-example-stdout | 12 +- ...ds-expunge-newly-added-external-dns-stdout | 6 +- ...ds-expunge-newly-added-internal-dns-stdout | 6 +- .../tests/output/cmds-expunge-zones-stdout | 4 +- .../output/cmds-host-phase-2-source-stdout | 8 +- .../output/cmds-mupdate-update-flow-stdout | 20 +- .../tests/output/cmds-nexus-generation-stdout | 10 +- .../output/cmds-noop-image-source-stdout | 4 +- .../tests/output/cmds-set-mgs-updates-stdout | 12 +- .../cmds-set-remove-mupdate-override-stdout | 4 +- .../tests/output/cmds-set-zone-images-stdout | 4 +- .../tests/output/cmds-target-release-stdout | 128 +- internal-dns/types/src/config.rs | 30 + internal-dns/types/src/names.rs | 3 + nexus-config/src/nexus_config.rs | 21 + nexus-sled-agent-shared/src/inventory.rs | 21 +- nexus/Cargo.toml | 1 + nexus/db-model/src/deployment.rs | 8 + nexus/db-model/src/inventory.rs | 8 + nexus/db-model/src/schema_versions.rs | 3 +- .../db-queries/src/db/datastore/deployment.rs | 1 + .../deployment/external_networking.rs | 1 + nexus/db-queries/src/db/datastore/rack.rs | 12 + nexus/db-schema/src/schema.rs | 3 + nexus/examples/config-second.toml | 7 + nexus/examples/config.toml | 5 + nexus/lockstep-api/Cargo.toml | 13 + nexus/lockstep-api/src/lib.rs | 24 + .../src/test_util/host_phase_2_test_state.rs | 2 +- .../reconfigurator/execution/src/database.rs | 1 + nexus/reconfigurator/execution/src/dns.rs | 26 +- .../planning/src/blueprint_builder/builder.rs | 1 + .../output/planner_nonprovisionable_2_2a.txt | 1 + nexus/src/app/mod.rs | 20 + nexus/src/lib.rs | 55 +- nexus/src/lockstep_api/http_entrypoints.rs | 24 + nexus/src/lockstep_api/mod.rs | 5 + nexus/test-interface/src/lib.rs | 10 +- nexus/test-utils/src/lib.rs | 84 +- nexus/tests/config.test.toml | 4 + nexus/types/src/deployment/execution/dns.rs | 13 +- nexus/types/src/deployment/zone_type.rs | 4 + openapi/nexus-internal.json | 14 + openapi/nexus-lockstep.json | 96 + .../sled-agent/sled-agent-4.0.0-fd6727.json | 8383 +++++++++++++++++ openapi/sled-agent/sled-agent-latest.json | 2 +- schema/all-zones-requests.json | 7 + schema/crdb/dbinit.sql | 22 +- schema/crdb/nexus-lockstep-port/up01.sql | 2 + schema/crdb/nexus-lockstep-port/up02.sql | 5 + schema/crdb/nexus-lockstep-port/up03.sql | 5 + schema/crdb/nexus-lockstep-port/up04.sql | 2 + schema/crdb/nexus-lockstep-port/up05.sql | 5 + schema/crdb/nexus-lockstep-port/up06.sql | 5 + sled-agent/api/Cargo.toml | 9 +- sled-agent/api/src/lib.rs | 33 + sled-agent/api/src/v3.rs | 486 + sled-agent/src/rack_setup/plan/service.rs | 13 +- sled-agent/src/server.rs | 25 +- sled-agent/src/services.rs | 10 + sled-agent/src/sim/server.rs | 3 +- 71 files changed, 9704 insertions(+), 165 deletions(-) create mode 100644 clients/nexus-lockstep-client/Cargo.toml create mode 100644 clients/nexus-lockstep-client/src/lib.rs create mode 100644 nexus/lockstep-api/Cargo.toml create mode 100644 nexus/lockstep-api/src/lib.rs create mode 100644 nexus/src/lockstep_api/http_entrypoints.rs create mode 100644 nexus/src/lockstep_api/mod.rs create mode 100644 openapi/nexus-lockstep.json create mode 100644 openapi/sled-agent/sled-agent-4.0.0-fd6727.json create mode 100644 schema/crdb/nexus-lockstep-port/up01.sql create mode 100644 schema/crdb/nexus-lockstep-port/up02.sql create mode 100644 schema/crdb/nexus-lockstep-port/up03.sql create mode 100644 schema/crdb/nexus-lockstep-port/up04.sql create mode 100644 schema/crdb/nexus-lockstep-port/up05.sql create mode 100644 schema/crdb/nexus-lockstep-port/up06.sql create mode 100644 sled-agent/api/src/v3.rs diff --git a/Cargo.lock b/Cargo.lock index 4514b5d78af..9c633d3b44d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6670,6 +6670,31 @@ dependencies = [ "uuid", ] +[[package]] +name = "nexus-lockstep-api" +version = "0.1.0" +dependencies = [ + "dropshot", + "nexus-types", + "omicron-workspace-hack", +] + +[[package]] +name = "nexus-lockstep-client" +version = "0.1.0" +dependencies = [ + "chrono", + "futures", + "omicron-workspace-hack", + "progenitor 0.10.0", + "regress", + "reqwest", + "schemars", + "serde", + "slog", + "uuid", +] + [[package]] name = "nexus-macros-common" version = "0.1.0" @@ -7956,6 +7981,7 @@ dependencies = [ "nexus-external-api", "nexus-internal-api", "nexus-inventory", + "nexus-lockstep-api", "nexus-metrics-producer-gc", "nexus-mgs-updates", "nexus-networking", @@ -8779,6 +8805,7 @@ dependencies = [ "newtype_derive", "nexus-external-api", "nexus-internal-api", + "nexus-lockstep-api", "ntp-admin-api", "omicron-workspace-hack", "openapi-lint", @@ -12427,8 +12454,11 @@ name = "sled-agent-api" version = "0.1.0" dependencies = [ "camino", + "chrono", "dropshot", "http", + "id-map", + "iddqd", "nexus-sled-agent-shared", "omicron-common", "omicron-uuid-kinds", diff --git a/Cargo.toml b/Cargo.toml index 91761edaa78..ae5deb44f6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "clients/gateway-client", "clients/installinator-client", "clients/nexus-client", + "clients/nexus-lockstep-client", "clients/ntp-admin-client", "clients/oxide-client", "clients/oximeter-client", @@ -89,6 +90,7 @@ members = [ "nexus/external-api", "nexus/internal-api", "nexus/inventory", + "nexus/lockstep-api", "nexus/macros-common", "nexus/metrics-producer-gc", "nexus/mgs-updates", @@ -174,6 +176,7 @@ default-members = [ "clients/gateway-client", "clients/installinator-client", "clients/nexus-client", + "clients/nexus-lockstep-client", "clients/ntp-admin-client", "clients/oxide-client", "clients/oximeter-client", @@ -249,6 +252,7 @@ default-members = [ "nexus/external-api", "nexus/internal-api", "nexus/inventory", + "nexus/lockstep-api", "nexus/macros-common", "nexus/metrics-producer-gc", "nexus/mgs-updates", @@ -545,6 +549,8 @@ nexus-defaults = { path = "nexus/defaults" } nexus-external-api = { path = "nexus/external-api" } nexus-inventory = { path = "nexus/inventory" } nexus-internal-api = { path = "nexus/internal-api" } +nexus-lockstep-api = { path = "nexus/lockstep-api" } +nexus-lockstep-client = { path = "clients/nexus-lockstep-client" } nexus-macros-common = { path = "nexus/macros-common" } nexus-metrics-producer-gc = { path = "nexus/metrics-producer-gc" } nexus-mgs-updates = { path = "nexus/mgs-updates" } diff --git a/clients/nexus-lockstep-client/Cargo.toml b/clients/nexus-lockstep-client/Cargo.toml new file mode 100644 index 00000000000..94bf5afffe4 --- /dev/null +++ b/clients/nexus-lockstep-client/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "nexus-lockstep-client" +version = "0.1.0" +edition = "2024" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +chrono.workspace = true +futures.workspace = true +omicron-workspace-hack.workspace = true +progenitor.workspace = true +regress.workspace = true +reqwest.workspace = true +schemars.workspace = true +serde.workspace = true +slog.workspace = true +uuid.workspace = true diff --git a/clients/nexus-lockstep-client/src/lib.rs b/clients/nexus-lockstep-client/src/lib.rs new file mode 100644 index 00000000000..c6ce38dc372 --- /dev/null +++ b/clients/nexus-lockstep-client/src/lib.rs @@ -0,0 +1,24 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Interface for making API requests to the Oxide control plane at large from +//! callers that update in lockstep with Nexus itself (e.g. rack initialization, +//! tests and debugging) + +progenitor::generate_api!( + spec = "../../openapi/nexus-lockstep.json", + interface = Positional, + derives = [schemars::JsonSchema], + inner_type = slog::Logger, + pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { + slog::debug!(log, "client request"; + "method" => %request.method(), + "uri" => %request.url(), + "body" => ?&request.body(), + ); + }), + post_hook = (|log: &slog::Logger, result: &Result<_, _>| { + slog::debug!(log, "client response"; "result" => ?result); + }), +); diff --git a/common/src/address.rs b/common/src/address.rs index 08fc5cb9c0d..3dfcdfb8d60 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -53,6 +53,7 @@ pub const BOOTSTRAP_ARTIFACT_PORT: u16 = 12227; pub const CRUCIBLE_PANTRY_PORT: u16 = 17000; pub const TFPORTD_PORT: u16 = 12231; pub const NEXUS_INTERNAL_PORT: u16 = 12221; +pub const NEXUS_LOCKSTEP_PORT: u16 = 12232; /// The port on which Nexus exposes its external API on the underlay network. /// diff --git a/dev-tools/ls-apis/api-manifest.toml b/dev-tools/ls-apis/api-manifest.toml index 13a986bfda8..45984bb053d 100644 --- a/dev-tools/ls-apis/api-manifest.toml +++ b/dev-tools/ls-apis/api-manifest.toml @@ -339,6 +339,15 @@ server_package_name = "nexus-internal-api" versioned_how = "client" versioned_how_reason = "Circular dependencies between Nexus and other services" +[[apis]] +client_package_name = "nexus-lockstep-client" +label = "Nexus Internal Lockstep API" +server_package_name = "nexus-lockstep-api" +# nexus-lockstep-client has to be client-versioned because it's got a cyclic +# dependency with sled-agent-client, which is server-versioned. +versioned_how = "client" +versioned_how_reason = "Circular dependencies between Nexus and other services" + [[apis]] client_package_name = "oxide-client" label = "External API" diff --git a/dev-tools/ls-apis/tests/api_dependencies.out b/dev-tools/ls-apis/tests/api_dependencies.out index 0eed460686c..4200b285b9d 100644 --- a/dev-tools/ls-apis/tests/api_dependencies.out +++ b/dev-tools/ls-apis/tests/api_dependencies.out @@ -70,6 +70,8 @@ Nexus Internal API (client: nexus-client) consumed by: oximeter-collector (omicron/oximeter/collector) via 1 path consumed by: propolis-server (propolis/bin/propolis-server) via 3 paths +Nexus Internal Lockstep API (client: nexus-lockstep-client) + NTP Admin (client: ntp-admin-client) consumed by: omicron-nexus (omicron/nexus) via 2 paths consumed by: omicron-sled-agent (omicron/sled-agent) via 2 paths diff --git a/dev-tools/omicron-dev/src/main.rs b/dev-tools/omicron-dev/src/main.rs index 70b331011c7..1b755920bb0 100644 --- a/dev-tools/omicron-dev/src/main.rs +++ b/dev-tools/omicron-dev/src/main.rs @@ -108,6 +108,10 @@ impl RunAllArgs { "omicron-dev: nexus internal API: {:?}", cptestctx.server.get_http_server_internal_address().await, ); + println!( + "omicron-dev: nexus lockstep API: {:?}", + cptestctx.server.get_http_server_lockstep_address().await, + ); println!( "omicron-dev: cockroachdb pid: {}", cptestctx.database.pid(), diff --git a/dev-tools/openapi-manager/Cargo.toml b/dev-tools/openapi-manager/Cargo.toml index b7b6cf51f1f..75d065e7b70 100644 --- a/dev-tools/openapi-manager/Cargo.toml +++ b/dev-tools/openapi-manager/Cargo.toml @@ -18,15 +18,16 @@ cockroach-admin-api.workspace = true debug-ignore.workspace = true dns-server-api.workspace = true dropshot.workspace = true -hex.workspace = true fs-err.workspace = true gateway-api.workspace = true +hex.workspace = true indent_write.workspace = true installinator-api.workspace = true itertools.workspace = true +newtype_derive.workspace = true nexus-external-api.workspace = true nexus-internal-api.workspace = true -newtype_derive.workspace = true +nexus-lockstep-api.workspace = true ntp-admin-api.workspace = true omicron-workspace-hack.workspace = true openapi-lint.workspace = true @@ -41,9 +42,9 @@ sha2.workspace = true similar.workspace = true sled-agent-api.workspace = true slog-error-chain.workspace = true +supports-color.workspace = true textwrap.workspace = true thiserror.workspace = true -supports-color.workspace = true wicketd-api.workspace = true [dev-dependencies] diff --git a/dev-tools/openapi-manager/src/omicron.rs b/dev-tools/openapi-manager/src/omicron.rs index 15857be598f..e5c9158d462 100644 --- a/dev-tools/openapi-manager/src/omicron.rs +++ b/dev-tools/openapi-manager/src/omicron.rs @@ -13,6 +13,7 @@ use gateway_api::gateway_api_mod; use installinator_api::installinator_api_mod; use nexus_external_api::nexus_external_api_mod; use nexus_internal_api::nexus_internal_api_mod; +use nexus_lockstep_api::nexus_lockstep_api_mod; use ntp_admin_api::ntp_admin_api_mod; use oximeter_api::oximeter_api_mod; use repo_depot_api::repo_depot_api_mod; @@ -136,6 +137,15 @@ pub fn all_apis() -> Vec { ident: "nexus-internal", extra_validation: None, }, + ManagedApiConfig { + title: "Nexus lockstep API", + versions: Versions::new_lockstep(semver::Version::new(0, 0, 1)), + description: "Nexus lockstep internal API", + boundary: ApiBoundary::Internal, + api_description: nexus_lockstep_api_mod::stub_api_description, + ident: "nexus-lockstep", + extra_validation: None, + }, ManagedApiConfig { title: "NTP Admin API", versions: Versions::new_versioned( diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index b781a454cb2..cd6b1089043 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -644,7 +644,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 37 (records: 47) + unchanged names: 38 (records: 50) external DNS: DNS zone: "oxide.example" (unchanged) @@ -813,6 +813,10 @@ internal DNS: + SRV port 5353 2521ef1e-ef52-46f0-9f27-93a3b6683d92.host.control-plane.oxide.internal + SRV port 5353 7f4e4ff4-b1eb-4c47-b079-9d7fcfcf35f6.host.control-plane.oxide.internal + SRV port 5353 e50dff5f-8d7c-440b-b86a-5ca7e9117dfa.host.control-plane.oxide.internal ++ name: _nexus-lockstep._tcp (records: 3) ++ SRV port 12232 a2f67884-b0e7-498a-a005-f6686f599ca6.host.control-plane.oxide.internal ++ SRV port 12232 e34091ec-ac63-4976-b2b8-85b9223c136f.host.control-plane.oxide.internal ++ SRV port 12232 fb88d7af-16bd-4638-a9f4-ef04d0045f20.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 a2f67884-b0e7-498a-a005-f6686f599ca6.host.control-plane.oxide.internal + SRV port 12221 e34091ec-ac63-4976-b2b8-85b9223c136f.host.control-plane.oxide.internal @@ -1025,6 +1029,10 @@ internal DNS: - SRV port 5353 2521ef1e-ef52-46f0-9f27-93a3b6683d92.host.control-plane.oxide.internal - SRV port 5353 7f4e4ff4-b1eb-4c47-b079-9d7fcfcf35f6.host.control-plane.oxide.internal - SRV port 5353 e50dff5f-8d7c-440b-b86a-5ca7e9117dfa.host.control-plane.oxide.internal +- name: _nexus-lockstep._tcp (records: 3) +- SRV port 12232 a2f67884-b0e7-498a-a005-f6686f599ca6.host.control-plane.oxide.internal +- SRV port 12232 e34091ec-ac63-4976-b2b8-85b9223c136f.host.control-plane.oxide.internal +- SRV port 12232 fb88d7af-16bd-4638-a9f4-ef04d0045f20.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 a2f67884-b0e7-498a-a005-f6686f599ca6.host.control-plane.oxide.internal - SRV port 12221 e34091ec-ac63-4976-b2b8-85b9223c136f.host.control-plane.oxide.internal @@ -1968,7 +1976,7 @@ internal DNS: + SRV port 123 ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host.control-plane.oxide.internal - name: e668d83e-a28c-42dc-b574-467e57403cc1.host (records: 1) - AAAA fd00:1122:3344:103::24 - unchanged names: 43 (records: 55) + unchanged names: 44 (records: 58) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index f1334083bf1..1ed80aeb5c9 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -482,7 +482,7 @@ internal DNS: - SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal + SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal + SRV port 5353 8c0a1969-15b6-4165-ba6d-a27c24151037.host.control-plane.oxide.internal - unchanged names: 91 (records: 103) + unchanged names: 92 (records: 106) external DNS: * DNS zone: "oxide.example": @@ -1005,7 +1005,7 @@ internal DNS: + SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal + name: fe2d5287-24e3-4071-b214-2640b097a759.host (records: 1) + AAAA fd00:1122:3344:103::30 - unchanged names: 91 (records: 103) + unchanged names: 92 (records: 106) external DNS: * DNS zone: "oxide.example": @@ -1517,7 +1517,7 @@ internal DNS: - SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal + SRV port 5353 1dc649b1-2ce2-4a85-bc1f-b8a3ef23a70e.host.control-plane.oxide.internal + SRV port 5353 fe2d5287-24e3-4071-b214-2640b097a759.host.control-plane.oxide.internal - unchanged names: 91 (records: 103) + unchanged names: 92 (records: 106) external DNS: * DNS zone: "oxide.example": diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index f5b7e9bcd70..a9f4bf4fc71 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -494,7 +494,7 @@ internal DNS: + AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 - unchanged names: 87 (records: 97) + unchanged names: 88 (records: 100) external DNS: DNS zone: "oxide.example" (unchanged) @@ -636,7 +636,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 91 (records: 103) + unchanged names: 92 (records: 106) external DNS: DNS zone: "oxide.example" (unchanged) @@ -826,7 +826,7 @@ internal DNS: + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 - unchanged names: 87 (records: 97) + unchanged names: 88 (records: 100) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-zones-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-zones-stdout index 888af94ffa1..cf9584026ae 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-zones-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-zones-stdout @@ -310,7 +310,7 @@ internal DNS: - AAAA fd00:1122:3344:103::26 - name: _crucible._tcp.694bd14f-cb24-4be4-bb19-876e79cda2c8 (records: 1) - SRV port 32345 694bd14f-cb24-4be4-bb19-876e79cda2c8.host.control-plane.oxide.internal - unchanged names: 49 (records: 63) + unchanged names: 50 (records: 66) external DNS: DNS zone: "oxide.example" (unchanged) @@ -478,7 +478,7 @@ internal DNS: - SRV port 32345 dfac80b4-a887-430a-ae87-a4e065dba787.host.control-plane.oxide.internal - name: dfac80b4-a887-430a-ae87-a4e065dba787.host (records: 1) - AAAA fd00:1122:3344:101::26 - unchanged names: 45 (records: 59) + unchanged names: 46 (records: 62) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout index 242e93f4be5..743dfa7d661 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout @@ -159,7 +159,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 49 (records: 59) + unchanged names: 50 (records: 62) external DNS: DNS zone: "oxide.example" (unchanged) @@ -320,7 +320,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 49 (records: 59) + unchanged names: 50 (records: 62) external DNS: DNS zone: "oxide.example" (unchanged) @@ -632,7 +632,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 49 (records: 59) + unchanged names: 50 (records: 62) external DNS: DNS zone: "oxide.example" (unchanged) @@ -793,7 +793,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 49 (records: 59) + unchanged names: 50 (records: 62) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 47bd97b5e97..1b7ce320057 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -712,7 +712,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -822,7 +822,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 38 (records: 51) + unchanged names: 39 (records: 54) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1065,7 +1065,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1376,7 +1376,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1708,7 +1708,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1771,7 +1771,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2091,7 +2091,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2365,7 +2365,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 39 (records: 53) + unchanged names: 40 (records: 56) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2523,7 +2523,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 40 (records: 55) + unchanged names: 41 (records: 58) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2665,7 +2665,7 @@ internal DNS: + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - unchanged names: 39 (records: 52) + unchanged names: 40 (records: 55) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout index f3e9e300ecd..b1f813834a2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout @@ -120,6 +120,14 @@ internal DNS: * DNS zone: "control-plane.oxide.internal": + name: 16a766ee-9400-4e67-9363-883670371a1b.host (records: 1) + AAAA fd00:1122:3344:101::28 +* name: _nexus-lockstep._tcp (records: 3 -> 4) +- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12232 16a766ee-9400-4e67-9363-883670371a1b.host.control-plane.oxide.internal ++ SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal * name: _nexus._tcp (records: 3 -> 4) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal @@ -170,7 +178,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 52 (records: 67) + unchanged names: 53 (records: 71) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 1de42a66bb2..203e65f87ab 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -392,7 +392,7 @@ internal DNS: + SRV port 123 e8fe709c-725f-4bb2-b714-ffcda13a9e54.host.control-plane.oxide.internal - name: db288a1e-c33c-44ca-8c79-9a8978afa34d.host (records: 1) - AAAA fd00:1122:3344:106::21 - unchanged names: 50 (records: 65) + unchanged names: 51 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -510,7 +510,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 52 (records: 73) + unchanged names: 53 (records: 76) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout index 8b62db5ca46..e7abda89b69 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout @@ -460,7 +460,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -498,7 +498,7 @@ to: blueprint ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -755,7 +755,7 @@ to: blueprint 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -794,7 +794,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1053,7 +1053,7 @@ to: blueprint 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1308,7 +1308,7 @@ to: blueprint 3682a71b-c6ca-4b7e-8f84-16df80c85960 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index 14510bfde28..d30341edef7 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -666,7 +666,7 @@ internal DNS: - AAAA fd00:1122:3344:107::22 - name: c800ba17-240e-4b72-8ae6-afc30b6baa96.host (records: 1) - AAAA fd00:1122:3344:107::21 - unchanged names: 51 (records: 67) + unchanged names: 52 (records: 70) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1015,7 +1015,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 52 (records: 73) + unchanged names: 53 (records: 76) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout index 170a184c2a2..049bfaa855e 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout @@ -340,7 +340,7 @@ to: blueprint f714e6ea-e85a-4d7d-93c2-a018744fe176 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 35 (records: 45) + unchanged names: 36 (records: 48) external DNS: DNS zone: "oxide.example" (unchanged) @@ -572,7 +572,7 @@ to: blueprint d9c572a1-a68c-4945-b1ec-5389bd588fe9 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 35 (records: 45) + unchanged names: 36 (records: 48) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 08eaa6fd97a..b89bbc53fb2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -385,7 +385,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1154,7 +1154,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1210,7 +1210,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1285,7 +1285,7 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1360,7 +1360,7 @@ to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1497,7 +1497,7 @@ to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1552,7 +1552,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1617,7 +1617,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1681,7 +1681,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1756,7 +1756,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1833,7 +1833,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1905,7 +1905,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -1981,7 +1981,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2053,7 +2053,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2128,7 +2128,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2265,7 +2265,7 @@ to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2341,7 +2341,7 @@ to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2419,7 +2419,7 @@ to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2490,7 +2490,7 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2565,7 +2565,7 @@ to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2642,7 +2642,7 @@ to: blueprint 778e3f3a-58b1-4a5e-acff-d23c5d7124c2 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2716,7 +2716,7 @@ to: blueprint 386a7ec3-7c2e-43cf-8f00-999e91e1d5e6 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2788,7 +2788,7 @@ to: blueprint e54a0836-53e1-4948-a3af-0b77165289b5 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -2920,7 +2920,7 @@ to: blueprint 459a45a5-616e-421f-873b-2fb08c36205c internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -3059,7 +3059,7 @@ to: blueprint b2295597-5788-482e-acf9-1731ec63fbd2 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -3194,7 +3194,7 @@ internal DNS: - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -3335,7 +3335,7 @@ internal DNS: + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: ba87399e-e9b7-4ee4-8cb7-0032822630e9.host (records: 1) + AAAA fd00:1122:3344:102::29 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -3471,7 +3471,7 @@ internal DNS: - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -3622,7 +3622,7 @@ internal DNS: + SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -3765,7 +3765,7 @@ to: blueprint 4d2eb6f3-7eb1-443a-8e76-7ecf05da2f6d internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -3918,7 +3918,7 @@ internal DNS: + AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 - unchanged names: 45 (records: 55) + unchanged names: 46 (records: 58) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4072,7 +4072,7 @@ internal DNS: + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 - unchanged names: 45 (records: 55) + unchanged names: 46 (records: 58) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4213,7 +4213,7 @@ internal DNS: + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) - AAAA fd00:1122:3344:102::25 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4355,7 +4355,7 @@ internal DNS: + SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4489,7 +4489,7 @@ to: blueprint e2deb7c0-2262-49fe-855f-4250c22afb36 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4623,7 +4623,7 @@ to: blueprint 23ce505c-8991-44a5-8863-f2b906fba9cf internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4747,7 +4747,7 @@ to: blueprint c0d81ea6-909c-4efb-964e-beff67f6da0d internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -4890,7 +4890,7 @@ internal DNS: + AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 - unchanged names: 46 (records: 56) + unchanged names: 47 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5033,7 +5033,7 @@ internal DNS: + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 - unchanged names: 46 (records: 56) + unchanged names: 47 (records: 59) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5157,7 +5157,7 @@ to: blueprint 316ccd9e-5c53-46c3-a2e9-20c3867b7111 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5290,7 +5290,7 @@ internal DNS: - SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5425,7 +5425,7 @@ internal DNS: + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: e14f91b0-0c41-48a0-919d-e5078d2b89b0.host (records: 1) + AAAA fd00:1122:3344:101::28 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5561,7 +5561,7 @@ internal DNS: - SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -5709,7 +5709,7 @@ internal DNS: + SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal + SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -5858,7 +5858,7 @@ internal DNS: + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal - name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) - AAAA fd00:1122:3344:101::24 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -5997,7 +5997,7 @@ internal DNS: + SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6128,7 +6128,7 @@ to: blueprint 9078c4ba-3a73-4b3f-ac2c-acb501f89cb2 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6252,7 +6252,7 @@ to: blueprint 8763abc1-8a42-4932-b5a7-33109e0e0152 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6383,7 +6383,7 @@ internal DNS: - SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal + SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6514,7 +6514,7 @@ internal DNS: + SRV port 17000 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e.host.control-plane.oxide.internal + SRV port 17000 7e83b92d-6b02-47f3-a5ab-125a6bb44e29.host.control-plane.oxide.internal + SRV port 17000 9464c6ed-ffa6-4e88-ae4e-76551d82b2af.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6637,7 +6637,7 @@ to: blueprint 59630e63-c953-4807-9e84-9e750a79f68e internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6778,7 +6778,7 @@ internal DNS: - AAAA fd00:1122:3344:3::1 - name: ns3 (records: 1) - AAAA fd00:1122:3344:3::1 - unchanged names: 47 (records: 57) + unchanged names: 48 (records: 60) external DNS: DNS zone: "oxide.example" (unchanged) @@ -6920,7 +6920,7 @@ internal DNS: + AAAA fd00:1122:3344:3::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 - unchanged names: 47 (records: 57) + unchanged names: 48 (records: 60) external DNS: DNS zone: "oxide.example" (unchanged) @@ -7055,7 +7055,7 @@ internal DNS: + SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal - name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) - AAAA fd00:1122:3344:103::21 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -7192,7 +7192,7 @@ internal DNS: + SRV port 123 9e2e0774-3cf6-4f75-9a12-92db05c77b81.host.control-plane.oxide.internal + SRV port 123 ba87399e-e9b7-4ee4-8cb7-0032822630e9.host.control-plane.oxide.internal + SRV port 123 e14f91b0-0c41-48a0-919d-e5078d2b89b0.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: DNS zone: "oxide.example" (unchanged) @@ -7320,7 +7320,7 @@ to: blueprint 810ea95a-4730-43dd-867e-1984aeb9d873 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -7458,7 +7458,7 @@ internal DNS: + SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal - name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) - AAAA fd00:1122:3344:103::23 - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -7605,7 +7605,7 @@ internal DNS: + SRV port 5353 26bdd109-c842-43a9-95cb-15aba9b0832b.host.control-plane.oxide.internal + SRV port 5353 4ab0ec67-b27e-42b5-af22-9117ad11113b.host.control-plane.oxide.internal + SRV port 5353 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82.host.control-plane.oxide.internal - unchanged names: 49 (records: 61) + unchanged names: 50 (records: 64) external DNS: * DNS zone: "oxide.example": @@ -7896,6 +7896,16 @@ internal DNS: + AAAA fd00:1122:3344:103::2b + name: 9ae90740-7fdb-4073-ae43-048f2fca3d69.host (records: 1) + AAAA fd00:1122:3344:102::2c +* name: _nexus-lockstep._tcp (records: 3 -> 6) +- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12232 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12232 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal * name: _nexus._tcp (records: 3 -> 6) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal @@ -7990,7 +8000,7 @@ to: blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 54 (records: 71) + unchanged names: 55 (records: 77) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/internal-dns/types/src/config.rs b/internal-dns/types/src/config.rs index a1c9c510442..961d4fac554 100644 --- a/internal-dns/types/src/config.rs +++ b/internal-dns/types/src/config.rs @@ -410,6 +410,32 @@ impl DnsConfigBuilder { self.service_backend_zone(ServiceName::Mgd, &zone, mgd_port) } + /// Higher-level shorthand for adding a Nexus zone with both its internal + /// API service and its lockstep API service. + /// + /// # Errors + /// + /// This function fails only if the given zone has already been added to the + /// configuration. + pub fn host_zone_nexus( + &mut self, + zone_id: OmicronZoneUuid, + internal_address: SocketAddrV6, + lockstep_port: u16, + ) -> anyhow::Result<()> { + let zone = self.host_zone(zone_id, *internal_address.ip())?; + self.service_backend_zone( + ServiceName::Nexus, + &zone, + internal_address.port(), + )?; + self.service_backend_zone( + ServiceName::NexusLockstep, + &zone, + lockstep_port, + ) + } + /// Higher-level shorthand for adding a ClickHouse single node zone with /// several services. /// @@ -743,6 +769,10 @@ mod test { assert_eq!(ServiceName::Cockroach.dns_name(), "_cockroach._tcp",); assert_eq!(ServiceName::InternalDns.dns_name(), "_nameservice._tcp",); assert_eq!(ServiceName::Nexus.dns_name(), "_nexus._tcp",); + assert_eq!( + ServiceName::NexusLockstep.dns_name(), + "_nexus-lockstep._tcp", + ); assert_eq!(ServiceName::Oximeter.dns_name(), "_oximeter._tcp",); assert_eq!( ServiceName::OximeterReader.dns_name(), diff --git a/internal-dns/types/src/names.rs b/internal-dns/types/src/names.rs index ac314009d28..500511954e4 100644 --- a/internal-dns/types/src/names.rs +++ b/internal-dns/types/src/names.rs @@ -58,6 +58,7 @@ pub enum ServiceName { InternalDns, ExternalDns, Nexus, + NexusLockstep, Oximeter, /// Determines whether to read from a replicated cluster or single-node /// ClickHouse installation. @@ -93,6 +94,7 @@ impl ServiceName { ServiceName::ExternalDns => "external-dns", ServiceName::InternalDns => "nameservice", ServiceName::Nexus => "nexus", + ServiceName::NexusLockstep => "nexus-lockstep", ServiceName::Oximeter => "oximeter", ServiceName::OximeterReader => "oximeter-reader", ServiceName::ManagementGatewayService => "mgs", @@ -126,6 +128,7 @@ impl ServiceName { | ServiceName::InternalDns | ServiceName::ExternalDns | ServiceName::Nexus + | ServiceName::NexusLockstep | ServiceName::Oximeter | ServiceName::OximeterReader | ServiceName::ManagementGatewayService diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 3948d841c2d..a91d98dcaa8 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -174,6 +174,9 @@ pub struct DeploymentConfig { /// Dropshot configuration for internal API server. #[schemars(skip)] // TODO we're protected against dropshot changes pub dropshot_internal: ConfigDropshot, + /// Dropshot configuration for lockstep API server. + #[schemars(skip)] // TODO we're protected against dropshot changes + pub dropshot_lockstep: ConfigDropshot, /// Describes how Nexus should find internal DNS servers /// for bootstrapping. pub internal_dns: InternalDns, @@ -1056,6 +1059,9 @@ mod test { [deployment.dropshot_internal] bind_address = "10.1.2.3:4568" default_request_body_max_bytes = 1024 + [deployment.dropshot_lockstep] + bind_address = "10.1.2.3:4569" + default_request_body_max_bytes = 1024 [deployment.internal_dns] type = "from_subnet" subnet.net = "::/56" @@ -1152,6 +1158,12 @@ mod test { .unwrap(), ..Default::default() }, + dropshot_lockstep: ConfigDropshot { + bind_address: "10.1.2.3:4569" + .parse::() + .unwrap(), + ..Default::default() + }, internal_dns: InternalDns::FromSubnet { subnet: Ipv6Subnet::::new( Ipv6Addr::LOCALHOST @@ -1383,6 +1395,9 @@ mod test { [deployment.dropshot_internal] bind_address = "10.1.2.3:4568" default_request_body_max_bytes = 1024 + [deployment.dropshot_lockstep] + bind_address = "10.1.2.3:4569" + default_request_body_max_bytes = 1024 [deployment.internal_dns] type = "from_subnet" subnet.net = "::/56" @@ -1480,6 +1495,9 @@ mod test { [deployment.dropshot_internal] bind_address = "10.1.2.3:4568" default_request_body_max_bytes = 1024 + [deployment.dropshot_lockstep] + bind_address = "10.1.2.3:4569" + default_request_body_max_bytes = 1024 [deployment.internal_dns] type = "from_subnet" subnet.net = "::/56" @@ -1534,6 +1552,9 @@ mod test { [deployment.dropshot_internal] bind_address = "10.1.2.3:4568" default_request_body_max_bytes = 1024 + [deployment.dropshot_lockstep] + bind_address = "10.1.2.3:4568" + default_request_body_max_bytes = 1024 [deployment.internal_dns] type = "from_subnet" subnet.net = "::/56" diff --git a/nexus-sled-agent-shared/src/inventory.rs b/nexus-sled-agent-shared/src/inventory.rs index 283f136d768..ab6ff1ee139 100644 --- a/nexus-sled-agent-shared/src/inventory.rs +++ b/nexus-sled-agent-shared/src/inventory.rs @@ -1103,7 +1103,7 @@ pub struct OmicronZoneConfig { pub zone_type: OmicronZoneType, // Use `InstallDataset` if this field is not present in a deserialized // blueprint or ledger. - #[serde(default = "deserialize_image_source_default")] + #[serde(default = "OmicronZoneImageSource::deserialize_default")] pub image_source: OmicronZoneImageSource, } @@ -1235,6 +1235,10 @@ pub enum OmicronZoneType { Nexus { /// The address at which the internal nexus server is reachable. internal_address: SocketAddrV6, + /// The port at which the internal lockstep server is reachable. This + /// shares the same IP address with `internal_address`. + #[serde(default = "default_nexus_lockstep_port")] + lockstep_port: u16, /// The address at which the external nexus server is reachable. external_ip: IpAddr, /// The service vNIC providing external connectivity using OPTE. @@ -1462,6 +1466,10 @@ impl OmicronZoneType { } } +fn default_nexus_lockstep_port() -> u16 { + omicron_common::address::NEXUS_LOCKSTEP_PORT +} + /// Like [`OmicronZoneType`], but without any associated data. /// /// This enum is meant to correspond exactly 1:1 with `OmicronZoneType`. @@ -1735,12 +1743,13 @@ impl OmicronZoneImageSource { None } } -} -// See `OmicronZoneConfig`. This is a separate function instead of being `impl -// Default` because we don't want to accidentally use this default in Rust code. -fn deserialize_image_source_default() -> OmicronZoneImageSource { - OmicronZoneImageSource::InstallDataset + // See `OmicronZoneConfig`. This is a separate function instead of being + // `impl Default` because we don't want to accidentally use this default + // outside of `serde(default)`. + pub fn deserialize_default() -> Self { + OmicronZoneImageSource::InstallDataset + } } #[cfg(test)] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 70404d1a842..eb509439521 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -61,6 +61,7 @@ nexus-background-task-interface.workspace = true nexus-config.workspace = true nexus-external-api.workspace = true nexus-internal-api.workspace = true +nexus-lockstep-api.workspace = true nexus-mgs-updates.workspace = true nexus-networking.workspace = true nexus-saga-recovery.workspace = true diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index c3d68c0a21e..37b1a1de795 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -574,6 +574,7 @@ pub struct BpOmicronZone { pub image_source: DbBpZoneImageSource, pub image_artifact_sha256: Option, pub nexus_generation: Option, + pub nexus_lockstep_port: Option, } impl BpOmicronZone { @@ -636,6 +637,7 @@ impl BpOmicronZone { snat_first_port: None, snat_last_port: None, nexus_generation: None, + nexus_lockstep_port: None, }; match &blueprint_zone.zone_type { @@ -763,6 +765,7 @@ impl BpOmicronZone { } BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address, + lockstep_port, external_ip, nic, external_tls, @@ -777,6 +780,8 @@ impl BpOmicronZone { bp_omicron_zone.bp_nic_id = Some(nic.id); bp_omicron_zone.second_service_ip = Some(IpNetwork::from(external_ip.ip)); + bp_omicron_zone.nexus_lockstep_port = + Some(SqlU16::from(*lockstep_port)); bp_omicron_zone.nexus_external_tls = Some(*external_tls); bp_omicron_zone.nexus_external_dns_servers = Some( external_dns_servers @@ -971,6 +976,9 @@ impl BpOmicronZone { ZoneType::Nexus => { BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address: primary_address, + lockstep_port: *self.nexus_lockstep_port.ok_or_else( + || anyhow!("expected 'nexus_lockstep_port'"), + )?, external_ip: OmicronZoneExternalFloatingIp { id: external_ip_id?, ip: self diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index d496deac679..fb216ac6ba8 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -2410,6 +2410,7 @@ pub struct InvOmicronSledConfigZone { pub filesystem_pool: Option>, pub image_source: InvZoneImageSource, pub image_artifact_sha256: Option, + pub nexus_lockstep_port: Option, } impl InvOmicronSledConfigZone { @@ -2463,6 +2464,7 @@ impl InvOmicronSledConfigZone { snat_last_port: None, image_source, image_artifact_sha256, + nexus_lockstep_port: None, }; match &zone.zone_type { @@ -2568,6 +2570,7 @@ impl InvOmicronSledConfigZone { } OmicronZoneType::Nexus { internal_address, + lockstep_port, external_ip, nic, external_tls, @@ -2589,6 +2592,8 @@ impl InvOmicronSledConfigZone { .map(IpNetwork::from) .collect(), ); + inv_omicron_zone.nexus_lockstep_port = + Some(SqlU16::from(*lockstep_port)); } OmicronZoneType::Oximeter { address } => { // Set the common fields @@ -2730,6 +2735,9 @@ impl InvOmicronSledConfigZone { } ZoneType::Nexus => OmicronZoneType::Nexus { internal_address: primary_address, + lockstep_port: *self + .nexus_lockstep_port + .ok_or_else(|| anyhow!("expected 'nexus_lockstep_port'"))?, external_ip: self .second_service_ip .ok_or_else(|| anyhow!("expected second service IP"))? diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index da2e76e8046..7b5281a7a60 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(192, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(193, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(193, "nexus-lockstep-port"), KnownVersion::new(192, "blueprint-source"), KnownVersion::new(191, "debug-log-blueprint-planner"), KnownVersion::new(190, "add-instance-cpu-platform"), diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index aefa3f96eb5..4cae278a6bd 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -4233,6 +4233,7 @@ mod tests { 0, 0, ), + lockstep_port: 0, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: "10.0.0.1".parse().unwrap(), diff --git a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs index dd525fdbbc9..b21ce19ad99 100644 --- a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs +++ b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs @@ -640,6 +640,7 @@ mod tests { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:0".parse().unwrap(), + lockstep_port: 0, external_ip: self.nexus_external_ip, nic: self.nexus_nic.clone(), external_tls: false, diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index b6437e0e022..e4e559ce755 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1506,6 +1506,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: nexus_ip, @@ -1764,6 +1766,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: nexus_ip_start.into(), @@ -1796,6 +1800,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: nexus_ip_end.into(), @@ -2048,6 +2054,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: nexus_ip_start.into(), @@ -2272,6 +2280,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip: nexus_ip, @@ -2412,6 +2422,8 @@ mod test { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address: "[::1]:80".parse().unwrap(), + lockstep_port: + omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip: OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), ip, diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 6425103fe24..777c4cb2dbb 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -1869,6 +1869,8 @@ table! { image_source -> crate::enums::InvZoneImageSourceEnum, image_artifact_sha256 -> Nullable, + + nexus_lockstep_port -> Nullable, } } @@ -2086,6 +2088,7 @@ table! { image_source -> crate::enums::BpZoneImageSourceEnum, image_artifact_sha256 -> Nullable, nexus_generation -> Nullable, + nexus_lockstep_port -> Nullable, } } diff --git a/nexus/examples/config-second.toml b/nexus/examples/config-second.toml index 5abc8235e0b..3bf8b526ad7 100644 --- a/nexus/examples/config-second.toml +++ b/nexus/examples/config-second.toml @@ -69,6 +69,13 @@ default_request_body_max_bytes = 1048576 bind_address = "[::1]:12223" default_request_body_max_bytes = 1048576 +[deployment.dropshot_lockstep] +# IP Address and TCP port on which to listen for the lockstep API +# This config file uses 12233 to avoid colliding with the usual 12232 that's +# used by `omicron-dev run-all` +bind_address = "[::1]:12233" +default_request_body_max_bytes = 1048576 + #[deployment.internal_dns] ## These values are overridden at the bottom of this file. #type = "from_address" diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index 34517500176..80fa495baad 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -53,6 +53,11 @@ default_request_body_max_bytes = 1048576 bind_address = "[::1]:12221" default_request_body_max_bytes = 1048576 +[deployment.dropshot_lockstep] +# IP Address and TCP port on which to listen for the lockstep API +bind_address = "[::1]:12232" +default_request_body_max_bytes = 1048576 + [deployment.internal_dns] # Example address. # If you're using `omicron-dev run-all`, this is value is overwritten diff --git a/nexus/lockstep-api/Cargo.toml b/nexus/lockstep-api/Cargo.toml new file mode 100644 index 00000000000..06ce4b373bf --- /dev/null +++ b/nexus/lockstep-api/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "nexus-lockstep-api" +version = "0.1.0" +edition = "2024" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +dropshot.workspace = true +nexus-types.workspace = true +omicron-workspace-hack.workspace = true diff --git a/nexus/lockstep-api/src/lib.rs b/nexus/lockstep-api/src/lib.rs new file mode 100644 index 00000000000..5d41943a1c4 --- /dev/null +++ b/nexus/lockstep-api/src/lib.rs @@ -0,0 +1,24 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use dropshot::{HttpError, HttpResponseOk, RequestContext}; +use nexus_types::external_api::views::{Ping, PingStatus}; + +#[dropshot::api_description] +pub trait NexusLockstepApi { + type Context; + + /// Ping API + /// + /// Always responds with Ok if it responds at all. + #[endpoint { + method = GET, + path = "/v1/ping", + }] + async fn ping( + _rqctx: RequestContext, + ) -> Result, HttpError> { + Ok(HttpResponseOk(Ping { status: PingStatus::Ok })) + } +} diff --git a/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs b/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs index bdb459319bb..b7b1bbd70c9 100644 --- a/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs +++ b/nexus/mgs-updates/src/test_util/host_phase_2_test_state.rs @@ -127,7 +127,7 @@ impl HostPhase2TestContext { .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( dropshot::ClientSpecifiesVersionInHeader::new( omicron_common::api::VERSION_HEADER, - sled_agent_api::VERSION_ADD_SWITCH_ZONE_OPERATOR_POLICY, + sled_agent_api::VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY, ), ))) .start() diff --git a/nexus/reconfigurator/execution/src/database.rs b/nexus/reconfigurator/execution/src/database.rs index b27c7cb224e..e4d897b6bdb 100644 --- a/nexus/reconfigurator/execution/src/database.rs +++ b/nexus/reconfigurator/execution/src/database.rs @@ -118,6 +118,7 @@ mod test { filesystem_pool: ZpoolName::new_external(ZpoolUuid::new_v4()), zone_type: BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address: "[::1]:0".parse().unwrap(), + lockstep_port: 0, external_dns_servers: Vec::new(), external_ip: nexus_types::deployment::OmicronZoneExternalFloatingIp { id: ExternalIpUuid::new_v4(), diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 4af5551d8f2..9fe8119937c 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -580,6 +580,7 @@ mod test { ) } OmicronZoneType::Nexus { + lockstep_port, external_dns_servers, external_ip, external_tls, @@ -591,6 +592,7 @@ mod test { )?; BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address, + lockstep_port, external_ip: OmicronZoneExternalFloatingIp { id: external_ip_id, ip: external_ip, @@ -990,6 +992,7 @@ mod test { ServiceName::InternalDns, ServiceName::ExternalDns, ServiceName::Nexus, + ServiceName::NexusLockstep, ServiceName::Oximeter, ServiceName::Dendrite, ServiceName::CruciblePantry, @@ -1642,17 +1645,20 @@ mod test { // Nothing was removed. assert!(diff.names_removed().next().is_none()); - // The SRV record for Nexus itself ought to have changed, growing one - // more record -- for the new AAAA record above. + // The SRV records for both nexus (internal) and nexus-lockstep ought + // to have changed, growing one more record -- for the new AAAA record + // above. let changed: Vec<_> = diff.names_changed().collect(); - assert_eq!(changed.len(), 1); - let (name, old_records, new_records) = changed[0]; - assert_eq!(name, ServiceName::Nexus.dns_name()); - let new_srv = subset_plus_one(old_records, new_records); - let DnsRecord::Srv(new_srv) = new_srv else { - panic!("expected SRV record, found {:?}", new_srv); - }; - assert_eq!(new_srv.target, new_zone_host.fqdn()); + assert_eq!(changed.len(), 2); + assert_eq!(changed[0].0, ServiceName::NexusLockstep.dns_name()); + assert_eq!(changed[1].0, ServiceName::Nexus.dns_name()); + for (_, old_records, new_records) in changed { + let new_srv = subset_plus_one(old_records, new_records); + let DnsRecord::Srv(new_srv) = new_srv else { + panic!("expected SRV record, found {:?}", new_srv); + }; + assert_eq!(new_srv.target, new_zone_host.fqdn()); + } // As for external DNS: all existing names ought to have been changed, // gaining a new A record for the new host. diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 8c86a313423..b76a6b7fbdd 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -1643,6 +1643,7 @@ impl<'a> BlueprintBuilder<'a> { let internal_address = SocketAddrV6::new(ip, port, 0, 0); let zone_type = BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address, + lockstep_port: omicron_common::address::NEXUS_LOCKSTEP_PORT, external_ip, nic, external_tls, diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index 162e4c1ad69..52d37033a2f 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -309,6 +309,7 @@ ZONE ERRORS: mismatched zone type: after: Nexus( Nexus { internal_address: [fd01:1122:3344:105::22]:12221, + lockstep_port: 12232, external_ip: OmicronZoneExternalFloatingIp { id: 6ebcade9-3a69-465e-99e9-6bf8eb9a8390 (external_ip), ip: 192.0.2.2, diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 04b09c5934a..0f33f470873 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -193,6 +193,9 @@ pub struct Nexus { /// Internal dropshot server internal_server: std::sync::Mutex>, + /// Lockstep dropshot server + lockstep_server: std::sync::Mutex>, + /// Status of background task to populate database populate_status: watch::Receiver, @@ -488,6 +491,7 @@ impl Nexus { external_server: std::sync::Mutex::new(None), techport_external_server: std::sync::Mutex::new(None), internal_server: std::sync::Mutex::new(None), + lockstep_server: std::sync::Mutex::new(None), producer_server: std::sync::Mutex::new(None), populate_status, reqwest_client, @@ -705,6 +709,7 @@ impl Nexus { external_server: DropshotServer, techport_external_server: DropshotServer, internal_server: DropshotServer, + lockstep_server: DropshotServer, producer_server: ProducerServer, ) { // If any servers already exist, close them. @@ -717,6 +722,7 @@ impl Nexus { .unwrap() .replace(techport_external_server); self.internal_server.lock().unwrap().replace(internal_server); + self.lockstep_server.lock().unwrap().replace(lockstep_server); self.producer_server.lock().unwrap().replace(producer_server); } @@ -763,6 +769,10 @@ impl Nexus { if let Some(server) = internal_server { extend_err(&mut res, server.close().await); } + let lockstep_server = self.lockstep_server.lock().unwrap().take(); + if let Some(server) = lockstep_server { + extend_err(&mut res, server.close().await); + } let producer_server = self.producer_server.lock().unwrap().take(); if let Some(server) = producer_server { extend_err( @@ -824,6 +834,16 @@ impl Nexus { .map(|server| server.local_addr()) } + pub(crate) async fn get_lockstep_server_address( + &self, + ) -> Option { + self.lockstep_server + .lock() + .unwrap() + .as_ref() + .map(|server| server.local_addr()) + } + /// Returns an [`OpContext`] used for authenticating external requests pub fn opctx_external_authn(&self) -> &OpContext { &self.opctx_external_authn diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index b0968e9b90f..eddb0520761 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -13,6 +13,7 @@ mod cidata; mod context; // Public for documentation examples pub mod external_api; // Public for testing mod internal_api; +mod lockstep_api; mod populate; mod saga_interface; @@ -23,6 +24,7 @@ use context::ServerContext; use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; use internal_api::http_entrypoints::internal_api; +use lockstep_api::http_entrypoints::lockstep_api; use nexus_config::NexusConfig; use nexus_db_model::RendezvousDebugDataset; use nexus_db_queries::db; @@ -61,6 +63,8 @@ pub struct InternalServer { apictx: ApiContext, /// dropshot server for internal API http_server_internal: dropshot::HttpServer, + /// dropshot server for lockstep API + http_server_lockstep: dropshot::HttpServer, config: NexusConfig, log: Logger, } @@ -83,6 +87,17 @@ impl InternalServer { ) .await?; + if config.deployment.dropshot_internal.bind_address.ip() + != config.deployment.dropshot_lockstep.bind_address.ip() + { + return Err(format!( + "internal server IP ({}) does not equal \ + lockstep server IP ({})", + config.deployment.dropshot_internal.bind_address.ip(), + config.deployment.dropshot_lockstep.bind_address.ip() + )); + } + // Launch the internal server. let http_server_internal = match dropshot::ServerBuilder::new( internal_api(), @@ -100,15 +115,44 @@ impl InternalServer { } }; + // Launch the lockstep server. This is launched at the same time as the + // internal server, before all the other servers. + let http_server_lockstep = match dropshot::ServerBuilder::new( + lockstep_api(), + context.clone(), + log.new(o!("component" => "dropshot_lockstep")), + ) + .config(config.deployment.dropshot_lockstep.clone()) + .start() + .map_err(|error| format!("initializing lockstep server: {}", error)) + { + Ok(server) => server, + Err(err) => { + context.context.nexus.datastore().terminate().await; + return Err(err); + } + }; + Ok(Self { apictx: context, http_server_internal, + http_server_lockstep, config: config.clone(), log, }) } } +impl nexus_test_interface::InternalServer for InternalServer { + fn get_http_server_internal_address(&self) -> SocketAddr { + self.http_server_internal.local_addr() + } + + fn get_http_server_lockstep_address(&self) -> SocketAddr { + self.http_server_lockstep.local_addr() + } +} + type DropshotServer = dropshot::HttpServer; /// Packages up a [`Nexus`], running both external and internal HTTP API servers @@ -122,6 +166,7 @@ impl Server { async fn start(internal: InternalServer) -> Result { let apictx = internal.apictx; let http_server_internal = internal.http_server_internal; + let http_server_lockstep = internal.http_server_lockstep; let log = internal.log; let config = internal.config; @@ -208,6 +253,7 @@ impl Server { http_server_external, http_server_techport_external, http_server_internal, + http_server_lockstep, producer_server, ) .await; @@ -236,11 +282,10 @@ impl nexus_test_interface::NexusServer for Server { async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> Result<(InternalServer, SocketAddr), String> { + ) -> Result { let internal_server = InternalServer::start(config, &log).await?; internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); - let addr = internal_server.http_server_internal.local_addr(); - Ok((internal_server, addr)) + Ok(internal_server) } async fn stop_internal(internal_server: InternalServer) { @@ -368,6 +413,10 @@ impl nexus_test_interface::NexusServer for Server { self.apictx.context.nexus.get_internal_server_address().await.unwrap() } + async fn get_http_server_lockstep_address(&self) -> SocketAddr { + self.apictx.context.nexus.get_lockstep_server_address().await.unwrap() + } + async fn upsert_test_dataset( &self, physical_disk: PhysicalDiskPutRequest, diff --git a/nexus/src/lockstep_api/http_entrypoints.rs b/nexus/src/lockstep_api/http_entrypoints.rs new file mode 100644 index 00000000000..5d771aa5912 --- /dev/null +++ b/nexus/src/lockstep_api/http_entrypoints.rs @@ -0,0 +1,24 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Handler functions (entrypoints) for HTTP APIs internal to the control plane +//! whose callers are updated in lockstep with Nexus + +use crate::context::ApiContext; +use dropshot::ApiDescription; +use nexus_lockstep_api::*; + +type NexusApiDescription = ApiDescription; + +/// Returns a description of the nexus lockstep API +pub(crate) fn lockstep_api() -> NexusApiDescription { + nexus_lockstep_api_mod::api_description::() + .expect("registered API endpoints successfully") +} + +enum NexusLockstepApiImpl {} + +impl NexusLockstepApi for NexusLockstepApiImpl { + type Context = ApiContext; +} diff --git a/nexus/src/lockstep_api/mod.rs b/nexus/src/lockstep_api/mod.rs new file mode 100644 index 00000000000..7b115b70b79 --- /dev/null +++ b/nexus/src/lockstep_api/mod.rs @@ -0,0 +1,5 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +pub mod http_entrypoints; diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index b32491da301..361daece4cf 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -48,12 +48,12 @@ use std::sync::Arc; #[async_trait] pub trait NexusServer: Send + Sync + 'static { - type InternalServer: Send + Sync + 'static; + type InternalServer: InternalServer; async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> Result<(Self::InternalServer, SocketAddr), String>; + ) -> Result; /// Stops the execution of a `Self::InternalServer`. /// @@ -88,6 +88,7 @@ pub trait NexusServer: Send + Sync + 'static { async fn get_http_server_external_address(&self) -> SocketAddr; async fn get_http_server_techport_address(&self) -> SocketAddr; async fn get_http_server_internal_address(&self) -> SocketAddr; + async fn get_http_server_lockstep_address(&self) -> SocketAddr; // Previously, as a dataset was created (within the sled agent), // we'd use an internal API from Nexus to record that the dataset @@ -128,3 +129,8 @@ pub trait NexusServer: Send + Sync + 'static { async fn close(self); } + +pub trait InternalServer: Send + Sync + 'static { + fn get_http_server_internal_address(&self) -> SocketAddr; + fn get_http_server_lockstep_address(&self) -> SocketAddr; +} diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 3fb226bbea0..37640ab2e8a 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -42,6 +42,7 @@ use nexus_sled_agent_shared::inventory::OmicronSledConfig; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; use nexus_sled_agent_shared::inventory::SledCpuFamily; use nexus_sled_agent_shared::recovery_silo::RecoverySiloConfig; +use nexus_test_interface::InternalServer; use nexus_test_interface::NexusServer; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintDatasetConfig; @@ -176,6 +177,7 @@ pub struct ControlPlaneTestContext { pub external_client: ClientTestContext, pub techport_client: ClientTestContext, pub internal_client: ClientTestContext, + pub lockstep_client: ClientTestContext, pub server: N, pub database: dev::db::CockroachInstance, pub database_admin: omicron_cockroach_admin::Server, @@ -377,6 +379,19 @@ impl RackInitRequestBuilder { .expect("Failed to set up DNS for GZ service"); } + // Special handling of Nexus, which has multiple SRV records for its single + // zone. + fn add_nexus_to_dns( + &mut self, + zone_id: OmicronZoneUuid, + address: SocketAddrV6, + lockstep_port: u16, + ) { + self.internal_dns_config + .host_zone_nexus(zone_id, address, lockstep_port) + .expect("Failed to set up Nexus DNS"); + } + // Special handling of ClickHouse, which has multiple SRV records for its // single zone. fn add_clickhouse_to_dns( @@ -424,6 +439,7 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub external_client: Option, pub techport_client: Option, pub internal_client: Option, + pub lockstep_client: Option, pub server: Option, pub database: Option, @@ -483,6 +499,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { external_client: None, techport_client: None, internal_client: None, + lockstep_client: None, server: None, database: None, database_admin: None, @@ -828,25 +845,36 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .clone(), }; - let (nexus_internal, nexus_internal_addr) = - N::start_internal(&self.config, &log).await?; - - let address = SocketAddrV6::new( - match nexus_internal_addr.ip() { - IpAddr::V4(addr) => addr.to_ipv6_mapped(), - IpAddr::V6(addr) => addr, - }, - nexus_internal_addr.port(), - 0, - 0, - ); + let nexus_internal = N::start_internal(&self.config, &log).await?; + let nexus_internal_addr = + nexus_internal.get_http_server_internal_address(); + let internal_address = match nexus_internal_addr { + SocketAddr::V4(addr) => { + SocketAddrV6::new(addr.ip().to_ipv6_mapped(), addr.port(), 0, 0) + } + SocketAddr::V6(addr) => addr, + }; + let lockstep_address = match nexus_internal + .get_http_server_lockstep_address() + { + SocketAddr::V4(addr) => { + SocketAddrV6::new(addr.ip().to_ipv6_mapped(), addr.port(), 0, 0) + } + SocketAddr::V6(addr) => addr, + }; + assert_eq!(internal_address.ip(), lockstep_address.ip()); - self.rack_init_builder.add_service_to_dns( + self.rack_init_builder.add_nexus_to_dns( self.config.deployment.id, - address, - ServiceName::Nexus, + internal_address, + lockstep_address.port(), + ); + self.record_nexus_zone( + self.config.clone(), + internal_address, + lockstep_address.port(), + 0, ); - self.record_nexus_zone(self.config.clone(), address, 0); self.nexus_internal = Some(nexus_internal); self.nexus_internal_addr = Some(nexus_internal_addr); Ok(()) @@ -890,13 +918,24 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { nexus/examples/config-second.toml" ); }; - self.record_nexus_zone(second_nexus_config, second_internal_address, 1); + let second_lockstep_port = second_nexus_config + .deployment + .dropshot_lockstep + .bind_address + .port(); + self.record_nexus_zone( + second_nexus_config, + second_internal_address, + second_lockstep_port, + 1, + ); } fn record_nexus_zone( &mut self, config: NexusConfig, internal_address: SocketAddrV6, + lockstep_port: u16, which: usize, ) { let id = config.deployment.id; @@ -925,6 +964,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { }, external_tls: config.deployment.dropshot_external.tls, internal_address, + lockstep_port, nic: NetworkInterface { id: Uuid::new_v4(), ip: NEXUS_OPTE_IPV4_SUBNET @@ -1080,6 +1120,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { server.get_http_server_techport_address().await; let internal_server_addr = server.get_http_server_internal_address().await; + let lockstep_server_addr = + server.get_http_server_lockstep_address().await; let testctx_external = ClientTestContext::new( external_server_addr, self.logctx @@ -1098,11 +1140,18 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .log .new(o!("component" => "internal client test context")), ); + let testctx_lockstep = ClientTestContext::new( + lockstep_server_addr, + self.logctx + .log + .new(o!("component" => "lockstep client test context")), + ); self.external_dns_zone_name = Some(external_dns_zone_name); self.external_client = Some(testctx_external); self.techport_client = Some(testctx_techport); self.internal_client = Some(testctx_internal); + self.lockstep_client = Some(testctx_lockstep); self.silo_name = Some(silo_name); self.user_name = Some(user_name); self.password = Some(TEST_SUITE_PASSWORD.to_string()); @@ -1463,6 +1512,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { external_client: self.external_client.unwrap(), techport_client: self.techport_client.unwrap(), internal_client: self.internal_client.unwrap(), + lockstep_client: self.lockstep_client.unwrap(), database: self.database.unwrap(), database_admin: self.database_admin.unwrap(), clickhouse: self.clickhouse.unwrap(), diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 70a1d79e1e9..56d174ce451 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -56,6 +56,10 @@ default_request_body_max_bytes = 1048576 bind_address = "127.0.0.1:0" default_request_body_max_bytes = 1048576 +[deployment.dropshot_lockstep] +bind_address = "127.0.0.1:0" +default_request_body_max_bytes = 1048576 + # # NOTE: for the test suite, the internal DNS address will be replaced with one # that's started by the test runner. diff --git a/nexus/types/src/deployment/execution/dns.rs b/nexus/types/src/deployment/execution/dns.rs index e1461c290fb..04a28aac921 100644 --- a/nexus/types/src/deployment/execution/dns.rs +++ b/nexus/types/src/deployment/execution/dns.rs @@ -97,8 +97,19 @@ pub fn blueprint_internal_dns_config( ) => (ServiceName::Cockroach, address), BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address, + lockstep_port, .. - }) => (ServiceName::Nexus, internal_address), + }) => { + // Add both the `nexus` service as well as the `nexus-lockstep` + // service. Continue so we don't fall through and call + // `host_zone_with_one_backend`. + dns_builder.host_zone_nexus( + zone.id, + *internal_address, + *lockstep_port, + )?; + continue 'all_zones; + } BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { address, .. diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 79cb68fb98a..ff58c4bc915 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -305,6 +305,7 @@ impl From for OmicronZoneType { } BlueprintZoneType::Nexus(zone) => Self::Nexus { internal_address: zone.internal_address, + lockstep_port: zone.lockstep_port, external_ip: zone.external_ip.ip, nic: zone.nic, external_tls: zone.external_tls, @@ -559,6 +560,9 @@ pub mod blueprint_zone_type { pub struct Nexus { /// The address at which the internal nexus server is reachable. pub internal_address: SocketAddrV6, + /// The port at which the lockstep server is reachable. This shares the + /// same IP address with `internal_address`. + pub lockstep_port: u16, /// The address at which the external nexus server is reachable. pub external_ip: OmicronZoneExternalFloatingIp, /// The service vNIC providing external connectivity using OPTE. diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 2b1e047683a..2764e19fca4 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3665,6 +3665,12 @@ "description": "The address at which the internal nexus server is reachable.", "type": "string" }, + "lockstep_port": { + "description": "The port at which the lockstep server is reachable. This shares the same IP address with `internal_address`.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, "nexus_generation": { "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", "allOf": [ @@ -3693,6 +3699,7 @@ "external_ip", "external_tls", "internal_address", + "lockstep_port", "nexus_generation", "nic", "type" @@ -6461,6 +6468,13 @@ "description": "The address at which the internal nexus server is reachable.", "type": "string" }, + "lockstep_port": { + "description": "The port at which the internal lockstep server is reachable. This shares the same IP address with `internal_address`.", + "default": 12232, + "type": "integer", + "format": "uint16", + "minimum": 0 + }, "nic": { "description": "The service vNIC providing external connectivity using OPTE.", "allOf": [ diff --git a/openapi/nexus-lockstep.json b/openapi/nexus-lockstep.json new file mode 100644 index 00000000000..5acf17a057f --- /dev/null +++ b/openapi/nexus-lockstep.json @@ -0,0 +1,96 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Nexus lockstep API", + "description": "Nexus lockstep internal API", + "contact": { + "url": "https://oxide.computer", + "email": "api@oxide.computer" + }, + "version": "0.0.1" + }, + "paths": { + "/v1/ping": { + "get": { + "summary": "Ping API", + "description": "Always responds with Ok if it responds at all.", + "operationId": "ping", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ping" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "Ping": { + "type": "object", + "properties": { + "status": { + "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "allOf": [ + { + "$ref": "#/components/schemas/PingStatus" + } + ] + } + }, + "required": [ + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + } + }, + "responses": { + "Error": { + "description": "Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} diff --git a/openapi/sled-agent/sled-agent-4.0.0-fd6727.json b/openapi/sled-agent/sled-agent-4.0.0-fd6727.json new file mode 100644 index 00000000000..c0dfc31d59e --- /dev/null +++ b/openapi/sled-agent/sled-agent-4.0.0-fd6727.json @@ -0,0 +1,8383 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Oxide Sled Agent API", + "description": "API for interacting with individual sleds", + "contact": { + "url": "https://oxide.computer", + "email": "api@oxide.computer" + }, + "version": "4.0.0" + }, + "paths": { + "/artifacts": { + "get": { + "operationId": "artifact_list", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactListResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/artifacts/{sha256}": { + "put": { + "operationId": "artifact_put", + "parameters": [ + { + "in": "path", + "name": "sha256", + "required": true, + "schema": { + "type": "string", + "format": "hex string (32 bytes)" + } + }, + { + "in": "query", + "name": "generation", + "required": true, + "schema": { + "$ref": "#/components/schemas/Generation" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactPutResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/artifacts/{sha256}/copy-from-depot": { + "post": { + "operationId": "artifact_copy_from_depot", + "parameters": [ + { + "in": "path", + "name": "sha256", + "required": true, + "schema": { + "type": "string", + "format": "hex string (32 bytes)" + } + }, + { + "in": "query", + "name": "generation", + "required": true, + "schema": { + "$ref": "#/components/schemas/Generation" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactCopyFromDepotBody" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "successfully enqueued operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactCopyFromDepotResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/artifacts-config": { + "get": { + "operationId": "artifact_config_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "artifact_config_put", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/bootstore/status": { + "get": { + "summary": "Get the internal state of the local bootstore node", + "operationId": "bootstore_status", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BootstoreStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/debug/switch-zone-policy": { + "get": { + "summary": "A debugging endpoint only used by `omdb` that allows us to test", + "description": "restarting the switch zone without restarting sled-agent. See for context.", + "operationId": "debug_operator_switch_zone_policy_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OperatorSwitchZonePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "A debugging endpoint only used by `omdb` that allows us to test", + "description": "restarting the switch zone without restarting sled-agent. See for context.\n\nSetting the switch zone policy is asynchronous and inherently racy with the standard process of starting the switch zone. If the switch zone is in the process of being started or stopped when this policy is changed, the new policy may not take effect until that transition completes.", + "operationId": "debug_operator_switch_zone_policy_put", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OperatorSwitchZonePolicy" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/disks/{disk_id}": { + "put": { + "operationId": "disk_put", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskEnsureBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskRuntimeState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/eip-gateways": { + "put": { + "summary": "Update per-NIC IP address <-> internet gateway mappings.", + "operationId": "set_eip_gateways", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ExternalIpGatewayMap" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/inventory": { + "get": { + "summary": "Fetch basic information about this sled", + "operationId": "inventory", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Inventory" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/network-bootstore-config": { + "get": { + "summary": "This API endpoint is only reading the local sled agent's view of the", + "description": "bootstore. The boostore is a distributed data store that is eventually consistent. Reads from individual nodes may not represent the latest state.", + "operationId": "read_network_bootstore_config_cache", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EarlyNetworkConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "write_network_bootstore_config", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EarlyNetworkConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/omicron-config": { + "put": { + "operationId": "omicron_config_put", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmicronSledConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sled-identifiers": { + "get": { + "summary": "Fetch sled identifiers", + "operationId": "sled_identifiers", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledIdentifiers" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sled-role": { + "get": { + "operationId": "sled_role_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledRole" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sleds": { + "put": { + "summary": "Add a sled to a rack that was already initialized via RSS", + "operationId": "sled_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AddSledRequest" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/dladm-info": { + "get": { + "operationId": "support_dladm_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/health-check": { + "get": { + "operationId": "support_health_check", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/ipadm-info": { + "get": { + "operationId": "support_ipadm_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/logs/download/{zone}": { + "get": { + "summary": "This endpoint returns a zip file of a zone's logs organized by service.", + "operationId": "support_logs_download", + "parameters": [ + { + "in": "path", + "name": "zone", + "description": "The zone for which one would like to collect logs for", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "query", + "name": "max_rotated", + "description": "The max number of rotated logs to include in the final support bundle", + "required": true, + "schema": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/support/logs/zones": { + "get": { + "summary": "This endpoint returns a list of known zones on a sled that have service", + "description": "logs that can be collected into a support bundle.", + "operationId": "support_logs", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_String", + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/nvmeadm-info": { + "get": { + "operationId": "support_nvmeadm_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/pargs-info": { + "get": { + "operationId": "support_pargs_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/pfiles-info": { + "get": { + "operationId": "support_pfiles_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/pstack-info": { + "get": { + "operationId": "support_pstack_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SledDiagnosticsQueryOutput", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/zfs-info": { + "get": { + "operationId": "support_zfs_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/zoneadm-info": { + "get": { + "operationId": "support_zoneadm_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support/zpool-info": { + "get": { + "operationId": "support_zpool_info", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledDiagnosticsQueryOutput" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}": { + "get": { + "summary": "List all support bundles within a particular dataset", + "operationId": "support_bundle_list", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_SupportBundleMetadata", + "type": "array", + "items": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}": { + "post": { + "summary": "Starts creation of a support bundle within a particular dataset", + "description": "Callers should transfer chunks of the bundle with \"support_bundle_transfer\", and then call \"support_bundle_finalize\" once the bundle has finished transferring.\n\nIf a support bundle was previously created without being finalized successfully, this endpoint will reset the state.\n\nIf a support bundle was previously created and finalized successfully, this endpoint will return metadata indicating that it already exists.", + "operationId": "support_bundle_start_creation", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Delete a support bundle from a particular dataset", + "operationId": "support_bundle_delete", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}/download": { + "get": { + "summary": "Fetch a support bundle from a particular dataset", + "operationId": "support_bundle_download", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "summary": "Fetch metadata about a support bundle from a particular dataset", + "operationId": "support_bundle_head", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}/download/{file}": { + "get": { + "summary": "Fetch a file within a support bundle from a particular dataset", + "operationId": "support_bundle_download_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "file", + "description": "The path of the file within the support bundle to query", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "summary": "Fetch metadata about a file within a support bundle from a particular dataset", + "operationId": "support_bundle_head_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "file", + "description": "The path of the file within the support bundle to query", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}/finalize": { + "post": { + "summary": "Finalizes the creation of a support bundle", + "description": "If the requested hash matched the bundle, the bundle is created. Otherwise, an error is returned.", + "operationId": "support_bundle_finalize", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + { + "in": "query", + "name": "hash", + "required": true, + "schema": { + "type": "string", + "format": "hex string (32 bytes)" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}/index": { + "get": { + "summary": "Fetch the index (list of files within a support bundle)", + "operationId": "support_bundle_index", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "summary": "Fetch metadata about the list of files within a support bundle", + "operationId": "support_bundle_head_index", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/support-bundles/{zpool_id}/{dataset_id}/{support_bundle_id}/transfer": { + "put": { + "summary": "Transfers a chunk of a support bundle within a particular dataset", + "operationId": "support_bundle_transfer", + "parameters": [ + { + "in": "path", + "name": "dataset_id", + "description": "The dataset on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + }, + { + "in": "path", + "name": "support_bundle_id", + "description": "The ID of the support bundle itself", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + { + "in": "path", + "name": "zpool_id", + "description": "The zpool on which this support bundle was provisioned", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + { + "in": "query", + "name": "offset", + "required": true, + "schema": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleMetadata" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/switch-ports": { + "post": { + "operationId": "uplink_ensure", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPorts" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v2p": { + "get": { + "summary": "List v2p mappings present on sled", + "operationId": "list_v2p", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_VirtualNetworkInterfaceHost", + "type": "array", + "items": { + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "Create a mapping from a virtual NIC to a physical host", + "operationId": "set_v2p", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Delete a mapping from a virtual NIC to a physical host", + "operationId": "del_v2p", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vmms/{propolis_id}": { + "put": { + "operationId": "vmm_register", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceEnsureBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledVmmState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "operationId": "vmm_unregister", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VmmUnregisterResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vmms/{propolis_id}/disks/{disk_id}/snapshot": { + "post": { + "summary": "Take a snapshot of a disk that is attached to an instance", + "operationId": "vmm_issue_disk_snapshot_request", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VmmIssueDiskSnapshotRequestBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VmmIssueDiskSnapshotRequestResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vmms/{propolis_id}/external-ip": { + "put": { + "operationId": "vmm_put_external_ip", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceExternalIpBody" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "operationId": "vmm_delete_external_ip", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceExternalIpBody" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vmms/{propolis_id}/state": { + "get": { + "operationId": "vmm_get_state", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledVmmState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "vmm_put_state", + "parameters": [ + { + "in": "path", + "name": "propolis_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VmmPutStateBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VmmPutStateResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vpc/{vpc_id}/firewall/rules": { + "put": { + "operationId": "vpc_firewall_rules_put", + "parameters": [ + { + "in": "path", + "name": "vpc_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcFirewallRulesEnsureBody" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/vpc-routes": { + "get": { + "summary": "Get the current versions of VPC routing rules.", + "operationId": "list_vpc_routes", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_ResolvedVpcRouteState", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResolvedVpcRouteState" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "Update VPC routing rules.", + "operationId": "set_vpc_routes", + "requestBody": { + "content": { + "application/json": { + "schema": { + "title": "Array_of_ResolvedVpcRouteSet", + "type": "array", + "items": { + "$ref": "#/components/schemas/ResolvedVpcRouteSet" + } + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones": { + "get": { + "summary": "List the zones that are currently managed by the sled agent.", + "operationId": "zones_list", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_String", + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundle-cleanup": { + "post": { + "summary": "Trigger a zone bundle cleanup.", + "operationId": "zone_bundle_cleanup", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Map_of_CleanupCount", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/CleanupCount" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundle-cleanup/context": { + "get": { + "summary": "Return context used by the zone-bundle cleanup task.", + "operationId": "zone_bundle_cleanup_context", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CleanupContext" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "Update context used by the zone-bundle cleanup task.", + "operationId": "zone_bundle_cleanup_context_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CleanupContextUpdate" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundle-cleanup/utilization": { + "get": { + "summary": "Return utilization information about all zone bundles.", + "operationId": "zone_bundle_utilization", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Map_of_BundleUtilization", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BundleUtilization" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundles": { + "get": { + "summary": "List all zone bundles that exist, even for now-deleted zones.", + "operationId": "zone_bundle_list_all", + "parameters": [ + { + "in": "query", + "name": "filter", + "description": "An optional substring used to filter zone bundles.", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_ZoneBundleMetadata", + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneBundleMetadata" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundles/{zone_name}": { + "get": { + "summary": "List the zone bundles that are available for a running zone.", + "operationId": "zone_bundle_list", + "parameters": [ + { + "in": "path", + "name": "zone_name", + "description": "The name of the zone.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_ZoneBundleMetadata", + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneBundleMetadata" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/zones/bundles/{zone_name}/{bundle_id}": { + "get": { + "summary": "Fetch the binary content of a single zone bundle.", + "operationId": "zone_bundle_get", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "The ID for this bundle itself.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "zone_name", + "description": "The name of the zone this bundle is derived from.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Delete a zone bundle.", + "operationId": "zone_bundle_delete", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "The ID for this bundle itself.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "zone_name", + "description": "The name of the zone this bundle is derived from.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "AddSledRequest": { + "description": "A request to Add a given sled after rack initialization has occurred", + "type": "object", + "properties": { + "sled_id": { + "$ref": "#/components/schemas/BaseboardId" + }, + "start_request": { + "$ref": "#/components/schemas/StartSledAgentRequest" + } + }, + "required": [ + "sled_id", + "start_request" + ] + }, + "ArtifactConfig": { + "type": "object", + "properties": { + "artifacts": { + "type": "array", + "items": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "uniqueItems": true + }, + "generation": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "artifacts", + "generation" + ] + }, + "ArtifactCopyFromDepotBody": { + "type": "object", + "properties": { + "depot_base_url": { + "type": "string" + } + }, + "required": [ + "depot_base_url" + ] + }, + "ArtifactCopyFromDepotResponse": { + "type": "object" + }, + "ArtifactListResponse": { + "type": "object", + "properties": { + "generation": { + "$ref": "#/components/schemas/Generation" + }, + "list": { + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + } + }, + "required": [ + "generation", + "list" + ] + }, + "ArtifactPutResponse": { + "type": "object", + "properties": { + "datasets": { + "description": "The number of valid M.2 artifact datasets we found on the sled. There is typically one of these datasets for each functional M.2.", + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "successful_writes": { + "description": "The number of valid writes to the M.2 artifact datasets. This should be less than or equal to the number of artifact datasets.", + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "datasets", + "successful_writes" + ] + }, + "Baseboard": { + "description": "Describes properties that should uniquely identify a Gimlet.", + "oneOf": [ + { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "model": { + "type": "string" + }, + "revision": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "gimlet" + ] + } + }, + "required": [ + "identifier", + "model", + "revision", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "model": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "pc" + ] + } + }, + "required": [ + "identifier", + "model", + "type" + ] + } + ] + }, + "BaseboardId": { + "description": "A representation of a Baseboard ID as used in the inventory subsystem This type is essentially the same as a `Baseboard` except it doesn't have a revision or HW type (Gimlet, PC, Unknown).", + "type": "object", + "properties": { + "part_number": { + "description": "Oxide Part Number", + "type": "string" + }, + "serial_number": { + "description": "Serial number (unique for a given part number)", + "type": "string" + } + }, + "required": [ + "part_number", + "serial_number" + ] + }, + "BfdMode": { + "description": "BFD connection mode.", + "type": "string", + "enum": [ + "single_hop", + "multi_hop" + ] + }, + "BfdPeerConfig": { + "type": "object", + "properties": { + "detection_threshold": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "local": { + "nullable": true, + "type": "string", + "format": "ip" + }, + "mode": { + "$ref": "#/components/schemas/BfdMode" + }, + "remote": { + "type": "string", + "format": "ip" + }, + "required_rx": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "switch": { + "$ref": "#/components/schemas/SwitchLocation" + } + }, + "required": [ + "detection_threshold", + "mode", + "remote", + "required_rx", + "switch" + ] + }, + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "default": null, + "type": "string" + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Net" + } + }, + "shaper": { + "nullable": true, + "description": "Shaper to apply to outgoing messages.", + "default": null, + "type": "string" + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "asn": { + "description": "The autonomous system number of the router the peer belongs to.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "connect_retry": { + "nullable": true, + "description": "The interval in seconds between peer connection retry attempts.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "delay_open": { + "nullable": true, + "description": "How long to delay sending open messages to a peer. In seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, + "hold_time": { + "nullable": true, + "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "idle_hold_time": { + "nullable": true, + "description": "How long to keep a peer in idle after a state machine reset in seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "keepalive": { + "nullable": true, + "description": "The interval to send keepalive messages at.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "default": null, + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "default": null, + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "default": null, + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "default": null, + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "default": null, + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "default": null, + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "addr", + "asn", + "port" + ] + }, + "BlobStorageBackend": { + "description": "A storage backend for a disk whose initial contents are given explicitly by the specification.", + "type": "object", + "properties": { + "base64": { + "description": "The disk's initial contents, encoded as a base64 string.", + "type": "string" + }, + "readonly": { + "description": "Indicates whether the storage is read-only.", + "type": "boolean" + } + }, + "required": [ + "base64", + "readonly" + ], + "additionalProperties": false + }, + "Board": { + "description": "A VM's mainboard.", + "type": "object", + "properties": { + "chipset": { + "description": "The chipset to expose to guest software.", + "allOf": [ + { + "$ref": "#/components/schemas/Chipset" + } + ] + }, + "cpuid": { + "nullable": true, + "description": "The CPUID values to expose to the guest. If `None`, bhyve will derive default values from the host's CPUID values.", + "allOf": [ + { + "$ref": "#/components/schemas/Cpuid" + } + ] + }, + "cpus": { + "description": "The number of virtual logical processors attached to this VM.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "guest_hv_interface": { + "description": "The hypervisor platform to expose to the guest. The default is a bhyve-compatible interface with no additional features.\n\nFor compatibility with older versions of Propolis, this field is only serialized if it specifies a non-default interface.", + "allOf": [ + { + "$ref": "#/components/schemas/GuestHypervisorInterface" + } + ] + }, + "memory_mb": { + "description": "The amount of guest RAM attached to this VM.", + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "chipset", + "cpus", + "memory_mb" + ], + "additionalProperties": false + }, + "BootImageHeader": { + "type": "object", + "properties": { + "data_size": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "flags": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "image_name": { + "type": "string" + }, + "image_size": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "sha256": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "minItems": 32, + "maxItems": 32 + }, + "target_size": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "data_size", + "flags", + "image_name", + "image_size", + "sha256", + "target_size" + ] + }, + "BootOrderEntry": { + "description": "An entry in the boot order stored in a [`BootSettings`] component.", + "type": "object", + "properties": { + "id": { + "description": "The ID of another component in the spec that Propolis should try to boot from.\n\nCurrently, only disk device components are supported.", + "allOf": [ + { + "$ref": "#/components/schemas/SpecKey" + } + ] + } + }, + "required": [ + "id" + ] + }, + "BootPartitionContents": { + "type": "object", + "properties": { + "boot_disk": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/M2Slot" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/M2Slot" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "slot_a": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/BootPartitionDetails" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/BootPartitionDetails" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "slot_b": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/BootPartitionDetails" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/BootPartitionDetails" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + } + }, + "required": [ + "boot_disk", + "slot_a", + "slot_b" + ] + }, + "BootPartitionDetails": { + "type": "object", + "properties": { + "artifact_hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "artifact_size": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "header": { + "$ref": "#/components/schemas/BootImageHeader" + } + }, + "required": [ + "artifact_hash", + "artifact_size", + "header" + ] + }, + "BootSettings": { + "description": "Settings supplied to the guest's firmware image that specify the order in which it should consider its options when selecting a device to try to boot from.", + "type": "object", + "properties": { + "order": { + "description": "An ordered list of components to attempt to boot from.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BootOrderEntry" + } + } + }, + "required": [ + "order" + ], + "additionalProperties": false + }, + "BootstoreStatus": { + "type": "object", + "properties": { + "accepted_connections": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "established_connections": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EstablishedConnection" + } + }, + "fsm_ledger_generation": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "fsm_state": { + "type": "string" + }, + "negotiating_connections": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "network_config_ledger_generation": { + "nullable": true, + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "peers": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "required": [ + "accepted_connections", + "established_connections", + "fsm_ledger_generation", + "fsm_state", + "negotiating_connections", + "peers" + ] + }, + "BundleUtilization": { + "description": "The portion of a debug dataset used for zone bundles.", + "type": "object", + "properties": { + "bytes_available": { + "description": "The total number of bytes available for zone bundles.\n\nThis is `dataset_quota` multiplied by the context's storage limit.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "bytes_used": { + "description": "Total bundle usage, in bytes.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "dataset_quota": { + "description": "The total dataset quota, in bytes.", + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "bytes_available", + "bytes_used", + "dataset_quota" + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Chipset": { + "description": "A kind of virtual chipset.", + "oneOf": [ + { + "description": "An Intel 440FX-compatible chipset.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i440_fx" + ] + }, + "value": { + "$ref": "#/components/schemas/I440Fx" + } + }, + "required": [ + "type", + "value" + ], + "additionalProperties": false + } + ] + }, + "CleanupContext": { + "description": "Context provided for the zone bundle cleanup task.", + "type": "object", + "properties": { + "period": { + "description": "The period on which automatic checks and cleanup is performed.", + "allOf": [ + { + "$ref": "#/components/schemas/CleanupPeriod" + } + ] + }, + "priority": { + "description": "The priority ordering for keeping old bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/PriorityOrder" + } + ] + }, + "storage_limit": { + "description": "The limit on the dataset quota available for zone bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/StorageLimit" + } + ] + } + }, + "required": [ + "period", + "priority", + "storage_limit" + ] + }, + "CleanupContextUpdate": { + "description": "Parameters used to update the zone bundle cleanup context.", + "type": "object", + "properties": { + "period": { + "nullable": true, + "description": "The new period on which automatic cleanups are run.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "priority": { + "nullable": true, + "description": "The priority ordering for preserving old zone bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/PriorityOrder" + } + ] + }, + "storage_limit": { + "nullable": true, + "description": "The new limit on the underlying dataset quota allowed for bundles.", + "type": "integer", + "format": "uint8", + "minimum": 0 + } + } + }, + "CleanupCount": { + "description": "The count of bundles / bytes removed during a cleanup operation.", + "type": "object", + "properties": { + "bundles": { + "description": "The number of bundles removed.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "bytes": { + "description": "The number of bytes removed.", + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "bundles", + "bytes" + ] + }, + "CleanupPeriod": { + "description": "A period on which bundles are automatically cleaned up.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "ComponentV0": { + "oneOf": [ + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/VirtioDisk" + }, + "type": { + "type": "string", + "enum": [ + "virtio_disk" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/NvmeDisk" + }, + "type": { + "type": "string", + "enum": [ + "nvme_disk" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/VirtioNic" + }, + "type": { + "type": "string", + "enum": [ + "virtio_nic" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/SerialPort" + }, + "type": { + "type": "string", + "enum": [ + "serial_port" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/PciPciBridge" + }, + "type": { + "type": "string", + "enum": [ + "pci_pci_bridge" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/QemuPvpanic" + }, + "type": { + "type": "string", + "enum": [ + "qemu_pvpanic" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/BootSettings" + }, + "type": { + "type": "string", + "enum": [ + "boot_settings" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/SoftNpuPciPort" + }, + "type": { + "type": "string", + "enum": [ + "soft_npu_pci_port" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/SoftNpuPort" + }, + "type": { + "type": "string", + "enum": [ + "soft_npu_port" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/SoftNpuP9" + }, + "type": { + "type": "string", + "enum": [ + "soft_npu_p9" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/P9fs" + }, + "type": { + "type": "string", + "enum": [ + "p9fs" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/MigrationFailureInjector" + }, + "type": { + "type": "string", + "enum": [ + "migration_failure_injector" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/CrucibleStorageBackend" + }, + "type": { + "type": "string", + "enum": [ + "crucible_storage_backend" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/FileStorageBackend" + }, + "type": { + "type": "string", + "enum": [ + "file_storage_backend" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/BlobStorageBackend" + }, + "type": { + "type": "string", + "enum": [ + "blob_storage_backend" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/VirtioNetworkBackend" + }, + "type": { + "type": "string", + "enum": [ + "virtio_network_backend" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "component": { + "$ref": "#/components/schemas/DlpiNetworkBackend" + }, + "type": { + "type": "string", + "enum": [ + "dlpi_network_backend" + ] + } + }, + "required": [ + "component", + "type" + ], + "additionalProperties": false + } + ] + }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "level": { + "$ref": "#/components/schemas/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + }, + "required": [ + "level", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "ConfigReconcilerInventory": { + "description": "Describes the last attempt made by the sled-agent-config-reconciler to reconcile the current sled config against the actual state of the sled.", + "type": "object", + "properties": { + "boot_partitions": { + "$ref": "#/components/schemas/BootPartitionContents" + }, + "datasets": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ConfigReconcilerInventoryResult" + } + }, + "external_disks": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ConfigReconcilerInventoryResult" + } + }, + "last_reconciled_config": { + "$ref": "#/components/schemas/OmicronSledConfig" + }, + "orphaned_datasets": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/OrphanedDataset" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/OrphanedDataset" + }, + "uniqueItems": true + }, + "remove_mupdate_override": { + "nullable": true, + "description": "The result of removing the mupdate override file on disk.\n\n`None` if `remove_mupdate_override` was not provided in the sled config.", + "allOf": [ + { + "$ref": "#/components/schemas/RemoveMupdateOverrideInventory" + } + ] + }, + "zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ConfigReconcilerInventoryResult" + } + } + }, + "required": [ + "boot_partitions", + "datasets", + "external_disks", + "last_reconciled_config", + "orphaned_datasets", + "zones" + ] + }, + "ConfigReconcilerInventoryResult": { + "oneOf": [ + { + "type": "object", + "properties": { + "result": { + "type": "string", + "enum": [ + "ok" + ] + } + }, + "required": [ + "result" + ] + }, + { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "result": { + "type": "string", + "enum": [ + "err" + ] + } + }, + "required": [ + "message", + "result" + ] + } + ] + }, + "ConfigReconcilerInventoryStatus": { + "description": "Status of the sled-agent-config-reconciler task.", + "oneOf": [ + { + "description": "The reconciler task has not yet run for the first time since sled-agent started.", + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "not_yet_run" + ] + } + }, + "required": [ + "status" + ] + }, + { + "description": "The reconciler task is actively running.", + "type": "object", + "properties": { + "config": { + "$ref": "#/components/schemas/OmicronSledConfig" + }, + "running_for": { + "$ref": "#/components/schemas/Duration" + }, + "started_at": { + "type": "string", + "format": "date-time" + }, + "status": { + "type": "string", + "enum": [ + "running" + ] + } + }, + "required": [ + "config", + "running_for", + "started_at", + "status" + ] + }, + { + "description": "The reconciler task is currently idle, but previously did complete a reconciliation attempt.\n\nThis variant does not include the `OmicronSledConfig` used in the last attempt, because that's always available via [`ConfigReconcilerInventory::last_reconciled_config`].", + "type": "object", + "properties": { + "completed_at": { + "type": "string", + "format": "date-time" + }, + "ran_for": { + "$ref": "#/components/schemas/Duration" + }, + "status": { + "type": "string", + "enum": [ + "idle" + ] + } + }, + "required": [ + "completed_at", + "ran_for", + "status" + ] + } + ] + }, + "Cpuid": { + "description": "A set of CPUID values to expose to a guest.", + "type": "object", + "properties": { + "entries": { + "description": "A list of CPUID leaves/subleaves and their associated values.\n\nPropolis servers require that each entry's `leaf` be unique and that it falls in either the \"standard\" (0 to 0xFFFF) or \"extended\" (0x8000_0000 to 0x8000_FFFF) function ranges, since these are the only valid input ranges currently defined by Intel and AMD. See the Intel 64 and IA-32 Architectures Software Developer's Manual (June 2024) Table 3-17 and the AMD64 Architecture Programmer's Manual (March 2024) Volume 3's documentation of the CPUID instruction.", + "type": "array", + "items": { + "$ref": "#/components/schemas/CpuidEntry" + } + }, + "vendor": { + "description": "The CPU vendor to emulate.\n\nCPUID leaves in the extended range (0x8000_0000 to 0x8000_FFFF) have vendor-defined semantics. Propolis uses this value to determine these semantics when deciding whether it needs to specialize the supplied template values for these leaves.", + "allOf": [ + { + "$ref": "#/components/schemas/CpuidVendor" + } + ] + } + }, + "required": [ + "entries", + "vendor" + ], + "additionalProperties": false + }, + "CpuidEntry": { + "description": "A full description of a CPUID leaf/subleaf and the values it produces.", + "type": "object", + "properties": { + "eax": { + "description": "The value to return in eax.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "ebx": { + "description": "The value to return in ebx.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "ecx": { + "description": "The value to return in ecx.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "edx": { + "description": "The value to return in edx.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "leaf": { + "description": "The leaf (function) number for this entry.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "subleaf": { + "nullable": true, + "description": "The subleaf (index) number for this entry, if it uses subleaves.", + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "eax", + "ebx", + "ecx", + "edx", + "leaf" + ], + "additionalProperties": false + }, + "CpuidVendor": { + "description": "A CPU vendor to use when interpreting the meanings of CPUID leaves in the extended ID range (0x80000000 to 0x8000FFFF).", + "type": "string", + "enum": [ + "amd", + "intel" + ] + }, + "CrucibleStorageBackend": { + "description": "A Crucible storage backend.", + "type": "object", + "properties": { + "readonly": { + "description": "Indicates whether the storage is read-only.", + "type": "boolean" + }, + "request_json": { + "description": "A serialized `[crucible_client_types::VolumeConstructionRequest]`. This is stored in serialized form so that breaking changes to the definition of a `VolumeConstructionRequest` do not inadvertently break instance spec deserialization.\n\nWhen using a spec to initialize a new instance, the spec author must ensure this request is well-formed and can be deserialized by the version of `crucible_client_types` used by the target Propolis.", + "type": "string" + } + }, + "required": [ + "readonly", + "request_json" + ], + "additionalProperties": false + }, + "DatasetConfig": { + "description": "Configuration information necessary to request a single dataset.\n\nThese datasets are tracked directly by Nexus.", + "type": "object", + "properties": { + "compression": { + "description": "The compression mode to be used by the dataset", + "allOf": [ + { + "$ref": "#/components/schemas/CompressionAlgorithm" + } + ] + }, + "id": { + "description": "The UUID of the dataset being requested", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + ] + }, + "name": { + "description": "The dataset's name", + "allOf": [ + { + "$ref": "#/components/schemas/DatasetName" + } + ] + }, + "quota": { + "nullable": true, + "description": "The upper bound on the amount of storage used by this dataset", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "reservation": { + "nullable": true, + "description": "The lower bound on the amount of storage usable by this dataset", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "compression", + "id", + "name" + ] + }, + "DatasetKind": { + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" + }, + "DatasetName": { + "type": "object", + "properties": { + "kind": { + "$ref": "#/components/schemas/DatasetKind" + }, + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "kind", + "pool_name" + ] + }, + "DhcpConfig": { + "description": "DHCP configuration for a port\n\nNot present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we use `InstanceRuntimeState::hostname` for this value.", + "type": "object", + "properties": { + "dns_servers": { + "description": "DNS servers to send to the instance\n\n(DHCPv4 option 6; DHCPv6 option 23)", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "host_domain": { + "nullable": true, + "description": "DNS zone this instance's hostname belongs to (e.g. the `project.example` part of `instance1.project.example`)\n\n(DHCPv4 option 15; used in DHCPv6 option 39)", + "type": "string" + }, + "search_domains": { + "description": "DNS search domains\n\n(DHCPv4 option 119; DHCPv6 option 24)", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "dns_servers", + "search_domains" + ] + }, + "DiskEnsureBody": { + "description": "Sent from to a sled agent to establish the runtime state of a Disk", + "type": "object", + "properties": { + "initial_runtime": { + "description": "Last runtime state of the Disk known to Nexus (used if the agent has never seen this Disk before).", + "allOf": [ + { + "$ref": "#/components/schemas/DiskRuntimeState" + } + ] + }, + "target": { + "description": "requested runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskStateRequested" + } + ] + } + }, + "required": [ + "initial_runtime", + "target" + ] + }, + "DiskIdentity": { + "description": "Uniquely identifies a disk.", + "type": "object", + "properties": { + "model": { + "type": "string" + }, + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" + } + }, + "required": [ + "model", + "serial", + "vendor" + ] + }, + "DiskRuntimeState": { + "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", + "type": "object", + "properties": { + "disk_state": { + "description": "runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskState" + } + ] + }, + "gen": { + "description": "generation number for this state", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "time_updated": { + "description": "timestamp for this information", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "disk_state", + "gen", + "time_updated" + ] + }, + "DiskState": { + "description": "State of a Disk", + "oneOf": [ + { + "description": "Disk is being initialized", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "creating" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready but detached from any Instance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "detached" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready to receive blocks from an external source", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "import_ready" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from a URL", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_url" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from bulk writes", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_bulk_writes" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being finalized to state Detached", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "finalizing" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is undergoing maintenance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "maintenance" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attached" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is being detached from the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "detaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk has been destroyed", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "destroyed" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is unavailable", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "faulted" + ] + } + }, + "required": [ + "state" + ] + } + ] + }, + "DiskStateRequested": { + "description": "Used to request a Disk state change", + "oneOf": [ + { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "detached" + ] + } + }, + "required": [ + "state" + ] + }, + { + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attached" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "destroyed" + ] + } + }, + "required": [ + "state" + ] + }, + { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "faulted" + ] + } + }, + "required": [ + "state" + ] + } + ] + }, + "DiskVariant": { + "type": "string", + "enum": [ + "U2", + "M2" + ] + }, + "DlpiNetworkBackend": { + "description": "A network backend associated with a DLPI VNIC on the host.", + "type": "object", + "properties": { + "vnic_name": { + "description": "The name of the VNIC to use as a backend.", + "type": "string" + } + }, + "required": [ + "vnic_name" + ], + "additionalProperties": false + }, + "Duration": { + "type": "object", + "properties": { + "nanos": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "secs": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "nanos", + "secs" + ] + }, + "EarlyNetworkConfig": { + "description": "Network configuration required to bring up the control plane\n\nThe fields in this structure are those from [`crate::rack_init::RackInitializeRequest`] necessary for use beyond RSS. This is just for the initial rack configuration and cold boot purposes. Updates come from Nexus.", + "type": "object", + "properties": { + "body": { + "$ref": "#/components/schemas/EarlyNetworkConfigBody" + }, + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "schema_version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "body", + "generation", + "schema_version" + ] + }, + "EarlyNetworkConfigBody": { + "description": "This is the actual configuration of EarlyNetworking.\n\nWe nest it below the \"header\" of `generation` and `schema_version` so that we can perform partial deserialization of `EarlyNetworkConfig` to only read the header and defer deserialization of the body once we know the schema version. This is possible via the use of [`serde_json::value::RawValue`] in future (post-v1) deserialization paths.", + "type": "object", + "properties": { + "ntp_servers": { + "description": "The external NTP server addresses.", + "type": "array", + "items": { + "type": "string" + } + }, + "rack_network_config": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RackNetworkConfigV2" + } + ] + } + }, + "required": [ + "ntp_servers" + ] + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "EstablishedConnection": { + "type": "object", + "properties": { + "addr": { + "type": "string" + }, + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + } + }, + "required": [ + "addr", + "baseboard" + ] + }, + "ExternalIpGatewayMap": { + "description": "Per-NIC mappings from external IP addresses to the Internet Gateways which can choose them as a source.", + "type": "object", + "properties": { + "mappings": { + "type": "object", + "additionalProperties": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string", + "format": "uuid" + }, + "uniqueItems": true + } + } + } + }, + "required": [ + "mappings" + ] + }, + "FileStorageBackend": { + "description": "A storage backend backed by a file in the host system's file system.", + "type": "object", + "properties": { + "block_size": { + "description": "Block size of the backend", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "path": { + "description": "A path to a file that backs a disk.", + "type": "string" + }, + "readonly": { + "description": "Indicates whether the storage is read-only.", + "type": "boolean" + }, + "workers": { + "nullable": true, + "description": "Optional worker threads for the file backend, exposed for testing only.", + "type": "integer", + "format": "uint", + "minimum": 1 + } + }, + "required": [ + "block_size", + "path", + "readonly" + ], + "additionalProperties": false + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "GuestHypervisorInterface": { + "description": "A hypervisor interface to expose to the guest.", + "oneOf": [ + { + "description": "Expose a bhyve-like interface (\"bhyve bhyve \" as the hypervisor ID in leaf 0x4000_0000 and no additional leaves or features).", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "bhyve" + ] + } + }, + "required": [ + "type" + ], + "additionalProperties": false + }, + { + "description": "Expose a Hyper-V-compatible hypervisor interface with the supplied features enabled.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "hyper_v" + ] + }, + "value": { + "type": "object", + "properties": { + "features": { + "type": "array", + "items": { + "$ref": "#/components/schemas/HyperVFeatureFlag" + }, + "uniqueItems": true + } + }, + "required": [ + "features" + ], + "additionalProperties": false + } + }, + "required": [ + "type", + "value" + ], + "additionalProperties": false + } + ] + }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "HostIdentifier": { + "description": "A `HostIdentifier` represents either an IP host or network (v4 or v6), or an entire VPC (identified by its VNI). It is used in firewall rule host filters.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "HostPhase2DesiredContents": { + "description": "Describes the desired contents of a host phase 2 slot (i.e., the boot partition on one of the internal M.2 drives).", + "oneOf": [ + { + "description": "Do not change the current contents.\n\nWe use this value when we've detected a sled has been mupdated (and we don't want to overwrite phase 2 images until we understand how to recover from that mupdate) and as the default value when reading an [`OmicronSledConfig`] that was ledgered before this concept existed.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "current_contents" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Set the phase 2 slot to the given artifact.\n\nThe artifact will come from an unpacked and distributed TUF repo.", + "type": "object", + "properties": { + "hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "type": { + "type": "string", + "enum": [ + "artifact" + ] + } + }, + "required": [ + "hash", + "type" + ] + } + ] + }, + "HostPhase2DesiredSlots": { + "description": "Describes the desired contents for both host phase 2 slots.", + "type": "object", + "properties": { + "slot_a": { + "$ref": "#/components/schemas/HostPhase2DesiredContents" + }, + "slot_b": { + "$ref": "#/components/schemas/HostPhase2DesiredContents" + } + }, + "required": [ + "slot_a", + "slot_b" + ] + }, + "HostPortConfig": { + "type": "object", + "properties": { + "addrs": { + "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool). May also include an optional VLAN ID.", + "type": "array", + "items": { + "$ref": "#/components/schemas/UplinkAddressConfig" + } + }, + "lldp": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/LldpPortConfig" + } + ] + }, + "port": { + "description": "Switchport to use for external connectivity", + "type": "string" + }, + "tx_eq": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig" + } + ] + } + }, + "required": [ + "addrs", + "port" + ] + }, + "Hostname": { + "title": "An RFC-1035-compliant hostname", + "description": "A hostname identifies a host on a network, and is usually a dot-delimited sequence of labels, where each label contains only letters, digits, or the hyphen. See RFCs 1035 and 952 for more details.", + "type": "string", + "pattern": "^([a-zA-Z0-9]+[a-zA-Z0-9\\-]*(? for background.", + "oneOf": [ + { + "description": "Start the switch zone if a switch is present.\n\nThis is the default policy.", + "type": "object", + "properties": { + "policy": { + "type": "string", + "enum": [ + "start_if_switch_present" + ] + } + }, + "required": [ + "policy" + ] + }, + { + "description": "Even if a switch zone is present, stop the switch zone.", + "type": "object", + "properties": { + "policy": { + "type": "string", + "enum": [ + "stop_despite_switch_presence" + ] + } + }, + "required": [ + "policy" + ] + } + ] + }, + "OrphanedDataset": { + "type": "object", + "properties": { + "available": { + "$ref": "#/components/schemas/ByteCount" + }, + "id": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + } + ] + }, + "mounted": { + "type": "boolean" + }, + "name": { + "$ref": "#/components/schemas/DatasetName" + }, + "reason": { + "type": "string" + }, + "used": { + "$ref": "#/components/schemas/ByteCount" + } + }, + "required": [ + "available", + "mounted", + "name", + "reason", + "used" + ] + }, + "P9fs": { + "description": "Describes a filesystem to expose through a P9 device.\n\nThis is only supported by Propolis servers compiled with the `falcon` feature.", + "type": "object", + "properties": { + "chunk_size": { + "description": "The chunk size to use in the 9P protocol. Vanilla Helios images should use 8192. Falcon Helios base images and Linux can use up to 65536.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "pci_path": { + "description": "The PCI path at which to attach the guest to this P9 filesystem.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + }, + "source": { + "description": "The host source path to mount into the guest.", + "type": "string" + }, + "target": { + "description": "The 9P target filesystem tag.", + "type": "string" + } + }, + "required": [ + "chunk_size", + "pci_path", + "source", + "target" + ], + "additionalProperties": false + }, + "PciPath": { + "description": "A PCI bus/device/function tuple.", + "type": "object", + "properties": { + "bus": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "device": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "function": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "bus", + "device", + "function" + ] + }, + "PciPciBridge": { + "description": "A PCI-PCI bridge.", + "type": "object", + "properties": { + "downstream_bus": { + "description": "The logical bus number of this bridge's downstream bus. Other devices may use this bus number in their PCI paths to indicate they should be attached to this bridge's bus.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "pci_path": { + "description": "The PCI path at which to attach this bridge.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + } + }, + "required": [ + "downstream_bus", + "pci_path" + ], + "additionalProperties": false + }, + "PortConfigV2": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses and optional vlan IDs", + "type": "array", + "items": { + "$ref": "#/components/schemas/UplinkAddressConfig" + } + }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "lldp": { + "nullable": true, + "description": "LLDP configuration for this port", + "allOf": [ + { + "$ref": "#/components/schemas/LldpPortConfig" + } + ] + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + }, + "tx_eq": { + "nullable": true, + "description": "TX-EQ configuration for this port", + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig" + } + ] + }, + "uplink_port_fec": { + "nullable": true, + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_speed" + ] + }, + "PortFec": { + "description": "Switchport FEC options", + "type": "string", + "enum": [ + "firecode", + "none", + "rs" + ] + }, + "PortSpeed": { + "description": "Switchport Speed options", + "type": "string", + "enum": [ + "speed0_g", + "speed1_g", + "speed10_g", + "speed25_g", + "speed40_g", + "speed50_g", + "speed100_g", + "speed200_g", + "speed400_g" + ] + }, + "PriorityDimension": { + "description": "A dimension along with bundles can be sorted, to determine priority.", + "oneOf": [ + { + "description": "Sorting by time, with older bundles with lower priority.", + "type": "string", + "enum": [ + "time" + ] + }, + { + "description": "Sorting by the cause for creating the bundle.", + "type": "string", + "enum": [ + "cause" + ] + } + ] + }, + "PriorityOrder": { + "description": "The priority order for bundles during cleanup.\n\nBundles are sorted along the dimensions in [`PriorityDimension`], with each dimension appearing exactly once. During cleanup, lesser-priority bundles are pruned first, to maintain the dataset quota. Note that bundles are sorted by each dimension in the order in which they appear, with each dimension having higher priority than the next.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PriorityDimension" + }, + "minItems": 2, + "maxItems": 2 + }, + "QemuPvpanic": { + "type": "object", + "properties": { + "enable_isa": { + "description": "Enable the QEMU PVPANIC ISA bus device (I/O port 0x505).", + "type": "boolean" + } + }, + "required": [ + "enable_isa" + ], + "additionalProperties": false + }, + "RackNetworkConfigV2": { + "description": "Initial network configuration", + "type": "object", + "properties": { + "bfd": { + "description": "BFD configuration for connecting the rack to external networks", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/BfdPeerConfig" + } + }, + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } + }, + "infra_ip_first": { + "description": "First ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "infra_ip_last": { + "description": "Last ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "ports": { + "description": "Uplinks for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/PortConfigV2" + } + }, + "rack_subnet": { + "$ref": "#/components/schemas/Ipv6Net" + } + }, + "required": [ + "bgp", + "infra_ip_first", + "infra_ip_last", + "ports", + "rack_subnet" + ] + }, + "RemoveMupdateOverrideBootSuccessInventory": { + "description": "Status of removing the mupdate override on the boot disk.", + "oneOf": [ + { + "description": "The mupdate override was successfully removed.", + "type": "string", + "enum": [ + "removed" + ] + }, + { + "description": "No mupdate override was found.\n\nThis is considered a success for idempotency reasons.", + "type": "string", + "enum": [ + "no_override" + ] + } + ] + }, + "RemoveMupdateOverrideInventory": { + "description": "Status of removing the mupdate override in the inventory.", + "type": "object", + "properties": { + "boot_disk_result": { + "description": "The result of removing the mupdate override on the boot disk.", + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/RemoveMupdateOverrideBootSuccessInventory" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/RemoveMupdateOverrideBootSuccessInventory" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "non_boot_message": { + "description": "What happened on non-boot disks.\n\nWe aren't modeling this out in more detail, because we plan to not try and keep ledgered data in sync across both disks in the future.", + "type": "string" + } + }, + "required": [ + "boot_disk_result", + "non_boot_message" + ] + }, + "ResolvedVpcFirewallRule": { + "description": "VPC firewall rule after object name resolution has been performed by Nexus", + "type": "object", + "properties": { + "action": { + "$ref": "#/components/schemas/VpcFirewallRuleAction" + }, + "direction": { + "$ref": "#/components/schemas/VpcFirewallRuleDirection" + }, + "filter_hosts": { + "nullable": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/HostIdentifier" + }, + "uniqueItems": true + }, + "filter_ports": { + "nullable": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/L4PortRange" + } + }, + "filter_protocols": { + "nullable": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleProtocol" + } + }, + "priority": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "status": { + "$ref": "#/components/schemas/VpcFirewallRuleStatus" + }, + "targets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NetworkInterface" + } + } + }, + "required": [ + "action", + "direction", + "priority", + "status", + "targets" + ] + }, + "ResolvedVpcRoute": { + "description": "A VPC route resolved into a concrete target.", + "type": "object", + "properties": { + "dest": { + "$ref": "#/components/schemas/IpNet" + }, + "target": { + "$ref": "#/components/schemas/RouterTarget" + } + }, + "required": [ + "dest", + "target" + ] + }, + "ResolvedVpcRouteSet": { + "description": "An updated set of routes for a given VPC and/or subnet.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/RouterId" + }, + "routes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResolvedVpcRoute" + }, + "uniqueItems": true + }, + "version": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RouterVersion" + } + ] + } + }, + "required": [ + "id", + "routes" + ] + }, + "ResolvedVpcRouteState": { + "description": "Version information for routes on a given VPC subnet.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/RouterId" + }, + "version": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RouterVersion" + } + ] + } + }, + "required": [ + "id" + ] + }, + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" + }, + "rib_priority": { + "nullable": true, + "description": "The RIB priority (i.e. Admin Distance) associated with this route.", + "default": null, + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "default": null, + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "destination", + "nexthop" + ] + }, + "RouterId": { + "description": "Identifier for a VPC and/or subnet.", + "type": "object", + "properties": { + "kind": { + "$ref": "#/components/schemas/RouterKind" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "kind", + "vni" + ] + }, + "RouterKind": { + "description": "The scope of a set of VPC router rules.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "system" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "type": { + "type": "string", + "enum": [ + "custom" + ] + } + }, + "required": [ + "subnet", + "type" + ] + } + ] + }, + "RouterTarget": { + "description": "The target for a given router entry.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "drop" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internet_gateway" + ] + }, + "value": { + "$ref": "#/components/schemas/InternetGatewayRouterTarget" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc_subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "RouterVersion": { + "description": "Information on the current parent router (and version) of a route set according to the control plane.", + "type": "object", + "properties": { + "router_id": { + "type": "string", + "format": "uuid" + }, + "version": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "router_id", + "version" + ] + }, + "SerialPort": { + "description": "A serial port device.", + "type": "object", + "properties": { + "num": { + "description": "The serial port number for this port.", + "allOf": [ + { + "$ref": "#/components/schemas/SerialPortNumber" + } + ] + } + }, + "required": [ + "num" + ], + "additionalProperties": false + }, + "SerialPortNumber": { + "description": "A serial port identifier, which determines what I/O ports a guest can use to access a port.", + "type": "string", + "enum": [ + "com1", + "com2", + "com3", + "com4" + ] + }, + "SledCpuFamily": { + "description": "Identifies the kind of CPU present on a sled, determined by reading CPUID.\n\nThis is intended to broadly support the control plane answering the question \"can I run this instance on that sled?\" given an instance with either no or some CPU platform requirement. It is not enough information for more precise placement questions - for example, is a CPU a high-frequency part or many-core part? We don't include Genoa here, but in that CPU family there are high frequency parts, many-core parts, and large-cache parts. To support those questions (or satisfactorily answer #8730) we would need to collect additional information and send it along.", + "oneOf": [ + { + "description": "The CPU vendor or its family number don't correspond to any of the known family variants.", + "type": "string", + "enum": [ + "unknown" + ] + }, + { + "description": "AMD Milan processors (or very close). Could be an actual Milan in a Gimlet, a close-to-Milan client Zen 3 part, or Zen 4 (for which Milan is the greatest common denominator).", + "type": "string", + "enum": [ + "amd_milan" + ] + }, + { + "description": "AMD Turin processors (or very close). Could be an actual Turin in a Cosmo, or a close-to-Turin client Zen 5 part.", + "type": "string", + "enum": [ + "amd_turin" + ] + }, + { + "description": "AMD Turin Dense processors. There are no \"Turin Dense-like\" CPUs unlike other cases, so this means a bona fide Zen 5c Turin Dense part.", + "type": "string", + "enum": [ + "amd_turin_dense" + ] + } + ] + }, + "SledDiagnosticsQueryOutput": { + "oneOf": [ + { + "type": "object", + "properties": { + "success": { + "type": "object", + "properties": { + "command": { + "description": "The command and its arguments.", + "type": "string" + }, + "exit_code": { + "nullable": true, + "description": "The exit code if one was present when the command exited.", + "type": "integer", + "format": "int32" + }, + "exit_status": { + "description": "The exit status of the command. This will be the exit code (if any) and exit reason such as from a signal.", + "type": "string" + }, + "stdio": { + "description": "Any stdout/stderr produced by the command.", + "type": "string" + } + }, + "required": [ + "command", + "exit_status", + "stdio" + ] + } + }, + "required": [ + "success" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "failure": { + "type": "object", + "properties": { + "error": { + "description": "The reason the command failed to execute.", + "type": "string" + } + }, + "required": [ + "error" + ] + } + }, + "required": [ + "failure" + ], + "additionalProperties": false + } + ] + }, + "SledIdentifiers": { + "description": "Identifiers for a single sled.\n\nThis is intended primarily to be used in timeseries, to identify sled from which metric data originates.", + "type": "object", + "properties": { + "model": { + "description": "Model name of the sled", + "type": "string" + }, + "rack_id": { + "description": "Control plane ID of the rack this sled is a member of", + "type": "string", + "format": "uuid" + }, + "revision": { + "description": "Revision number of the sled", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "serial": { + "description": "Serial number of the sled", + "type": "string" + }, + "sled_id": { + "description": "Control plane ID for the sled itself", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "model", + "rack_id", + "revision", + "serial", + "sled_id" + ] + }, + "SledRole": { + "description": "Describes the role of the sled within the rack.\n\nNote that this may change if the sled is physically moved within the rack.", + "oneOf": [ + { + "description": "The sled is a general compute sled.", + "type": "string", + "enum": [ + "gimlet" + ] + }, + { + "description": "The sled is attached to the network switch, and has additional responsibilities.", + "type": "string", + "enum": [ + "scrimlet" + ] + } + ] + }, + "SledVmmState": { + "description": "A wrapper type containing a sled's total knowledge of the state of a VMM.", + "type": "object", + "properties": { + "migration_in": { + "nullable": true, + "description": "The current state of any inbound migration to this VMM.", + "allOf": [ + { + "$ref": "#/components/schemas/MigrationRuntimeState" + } + ] + }, + "migration_out": { + "nullable": true, + "description": "The state of any outbound migration from this VMM.", + "allOf": [ + { + "$ref": "#/components/schemas/MigrationRuntimeState" + } + ] + }, + "vmm_state": { + "description": "The most recent state of the sled's VMM process.", + "allOf": [ + { + "$ref": "#/components/schemas/VmmRuntimeState" + } + ] + } + }, + "required": [ + "vmm_state" + ] + }, + "SoftNpuP9": { + "description": "Describes a PCI device that shares host files with the guest using the P9 protocol.\n\nThis is only supported by Propolis servers compiled with the `falcon` feature.", + "type": "object", + "properties": { + "pci_path": { + "description": "The PCI path at which to attach the guest to this port.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + } + }, + "required": [ + "pci_path" + ], + "additionalProperties": false + }, + "SoftNpuPciPort": { + "description": "Describes a SoftNPU PCI device.\n\nThis is only supported by Propolis servers compiled with the `falcon` feature.", + "type": "object", + "properties": { + "pci_path": { + "description": "The PCI path at which to attach the guest to this port.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + } + }, + "required": [ + "pci_path" + ], + "additionalProperties": false + }, + "SoftNpuPort": { + "description": "Describes a port in a SoftNPU emulated ASIC.\n\nThis is only supported by Propolis servers compiled with the `falcon` feature.", + "type": "object", + "properties": { + "backend_id": { + "description": "The name of the port's associated DLPI backend.", + "allOf": [ + { + "$ref": "#/components/schemas/SpecKey" + } + ] + }, + "link_name": { + "description": "The data link name for this port.", + "type": "string" + } + }, + "required": [ + "backend_id", + "link_name" + ], + "additionalProperties": false + }, + "SourceNatConfig": { + "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", + "type": "object", + "properties": { + "first_port": { + "description": "The first port used for source NAT, inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "description": "The external address provided to the instance or service.", + "type": "string", + "format": "ip" + }, + "last_port": { + "description": "The last port used for source NAT, also inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "last_port" + ] + }, + "SpecKey": { + "description": "A key identifying a component in an instance spec.", + "oneOf": [ + { + "title": "uuid", + "allOf": [ + { + "type": "string", + "format": "uuid" + } + ] + }, + { + "title": "name", + "allOf": [ + { + "type": "string" + } + ] + } + ] + }, + "StartSledAgentRequest": { + "description": "Configuration information for launching a Sled Agent.", + "type": "object", + "properties": { + "body": { + "$ref": "#/components/schemas/StartSledAgentRequestBody" + }, + "generation": { + "description": "The current generation number of data as stored in CRDB.\n\nThe initial generation is set during RSS time and then only mutated by Nexus. For now, we don't actually anticipate mutating this data, but we leave open the possiblity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "schema_version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "body", + "generation", + "schema_version" + ] + }, + "StartSledAgentRequestBody": { + "description": "This is the actual app level data of `StartSledAgentRequest`\n\nWe nest it below the \"header\" of `generation` and `schema_version` so that we can perform partial deserialization of `EarlyNetworkConfig` to only read the header and defer deserialization of the body once we know the schema version. This is possible via the use of [`serde_json::value::RawValue`] in future (post-v1) deserialization paths.", + "type": "object", + "properties": { + "id": { + "description": "Uuid of the Sled Agent to be created.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + ] + }, + "is_lrtq_learner": { + "description": "Is this node an LRTQ learner node?\n\nWe only put the node into learner mode if `use_trust_quorum` is also true.", + "type": "boolean" + }, + "rack_id": { + "description": "Uuid of the rack to which this sled agent belongs.", + "type": "string", + "format": "uuid" + }, + "subnet": { + "description": "Portion of the IP space to be managed by the Sled Agent.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Subnet" + } + ] + }, + "use_trust_quorum": { + "description": "Use trust quorum for key generation", + "type": "boolean" + } + }, + "required": [ + "id", + "is_lrtq_learner", + "rack_id", + "subnet", + "use_trust_quorum" + ] + }, + "StorageLimit": { + "description": "The limit on space allowed for zone bundles, as a percentage of the overall dataset's quota.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "SupportBundleMetadata": { + "description": "Metadata about a support bundle", + "type": "object", + "properties": { + "state": { + "$ref": "#/components/schemas/SupportBundleState" + }, + "support_bundle_id": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + } + }, + "required": [ + "state", + "support_bundle_id" + ] + }, + "SupportBundleState": { + "type": "string", + "enum": [ + "complete", + "incomplete" + ] + }, + "SwitchLocation": { + "description": "Identifies switch physical location", + "oneOf": [ + { + "description": "Switch in upper slot", + "type": "string", + "enum": [ + "switch0" + ] + }, + { + "description": "Switch in lower slot", + "type": "string", + "enum": [ + "switch1" + ] + } + ] + }, + "SwitchPorts": { + "description": "A set of switch uplinks.", + "type": "object", + "properties": { + "uplinks": { + "type": "array", + "items": { + "$ref": "#/components/schemas/HostPortConfig" + } + } + }, + "required": [ + "uplinks" + ] + }, + "TxEqConfig": { + "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", + "type": "object", + "properties": { + "main": { + "nullable": true, + "description": "Main tap", + "type": "integer", + "format": "int32" + }, + "post1": { + "nullable": true, + "description": "Post-cursor tap1", + "type": "integer", + "format": "int32" + }, + "post2": { + "nullable": true, + "description": "Post-cursor tap2", + "type": "integer", + "format": "int32" + }, + "pre1": { + "nullable": true, + "description": "Pre-cursor tap1", + "type": "integer", + "format": "int32" + }, + "pre2": { + "nullable": true, + "description": "Pre-cursor tap2", + "type": "integer", + "format": "int32" + } + } + }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForInstanceKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForInternalZpoolKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForMupdateKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForMupdateOverrideKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForOmicronZoneKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForPhysicalDiskKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForSledKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForSupportBundleKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, + "UplinkAddressConfig": { + "type": "object", + "properties": { + "address": { + "$ref": "#/components/schemas/IpNet" + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id (if any) associated with this address.", + "default": null, + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "address" + ] + }, + "VirtioDisk": { + "description": "A disk that presents a virtio-block interface to the guest.", + "type": "object", + "properties": { + "backend_id": { + "description": "The name of the disk's backend component.", + "allOf": [ + { + "$ref": "#/components/schemas/SpecKey" + } + ] + }, + "pci_path": { + "description": "The PCI bus/device/function at which this disk should be attached.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + } + }, + "required": [ + "backend_id", + "pci_path" + ], + "additionalProperties": false + }, + "VirtioNetworkBackend": { + "description": "A network backend associated with a virtio-net (viona) VNIC on the host.", + "type": "object", + "properties": { + "vnic_name": { + "description": "The name of the viona VNIC to use as a backend.", + "type": "string" + } + }, + "required": [ + "vnic_name" + ], + "additionalProperties": false + }, + "VirtioNic": { + "description": "A network card that presents a virtio-net interface to the guest.", + "type": "object", + "properties": { + "backend_id": { + "description": "The name of the device's backend.", + "allOf": [ + { + "$ref": "#/components/schemas/SpecKey" + } + ] + }, + "interface_id": { + "description": "A caller-defined correlation identifier for this interface. If Propolis is configured to collect network interface kstats in its Oximeter metrics, the metric series for this interface will be associated with this identifier.", + "type": "string", + "format": "uuid" + }, + "pci_path": { + "description": "The PCI path at which to attach this device.", + "allOf": [ + { + "$ref": "#/components/schemas/PciPath" + } + ] + } + }, + "required": [ + "backend_id", + "interface_id", + "pci_path" + ], + "additionalProperties": false + }, + "VirtualNetworkInterfaceHost": { + "description": "A mapping from a virtual NIC to a physical host", + "type": "object", + "properties": { + "physical_host_ip": { + "type": "string", + "format": "ipv6" + }, + "virtual_ip": { + "type": "string", + "format": "ip" + }, + "virtual_mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "physical_host_ip", + "virtual_ip", + "virtual_mac", + "vni" + ] + }, + "VmmIssueDiskSnapshotRequestBody": { + "type": "object", + "properties": { + "snapshot_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "snapshot_id" + ] + }, + "VmmIssueDiskSnapshotRequestResponse": { + "type": "object", + "properties": { + "snapshot_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "snapshot_id" + ] + }, + "VmmPutStateBody": { + "description": "The body of a request to move a previously-ensured instance into a specific runtime state.", + "type": "object", + "properties": { + "state": { + "description": "The state into which the instance should be driven.", + "allOf": [ + { + "$ref": "#/components/schemas/VmmStateRequested" + } + ] + } + }, + "required": [ + "state" + ] + }, + "VmmPutStateResponse": { + "description": "The response sent from a request to move an instance into a specific runtime state.", + "type": "object", + "properties": { + "updated_runtime": { + "nullable": true, + "description": "The current runtime state of the instance after handling the request to change its state. If the instance's state did not change, this field is `None`.", + "allOf": [ + { + "$ref": "#/components/schemas/SledVmmState" + } + ] + } + } + }, + "VmmRuntimeState": { + "description": "The dynamic runtime properties of an individual VMM process.", + "type": "object", + "properties": { + "gen": { + "description": "The generation number for this VMM's state.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "state": { + "description": "The last state reported by this VMM.", + "allOf": [ + { + "$ref": "#/components/schemas/VmmState" + } + ] + }, + "time_updated": { + "description": "Timestamp for the VMM's state.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "gen", + "state", + "time_updated" + ] + }, + "VmmSpec": { + "description": "Specifies the virtual hardware configuration of a new Propolis VMM in the form of a Propolis instance specification.\n\nSled-agent expects that when an instance spec is provided alongside an `InstanceSledLocalConfig` to initialize a new instance, the NIC IDs in that config's network interface list will match the IDs of the virtio network backends in the instance spec.", + "allOf": [ + { + "$ref": "#/components/schemas/InstanceSpecV0" + } + ] + }, + "VmmState": { + "description": "One of the states that a VMM can be in.", + "oneOf": [ + { + "description": "The VMM is initializing and has not started running guest CPUs yet.", + "type": "string", + "enum": [ + "starting" + ] + }, + { + "description": "The VMM has finished initializing and may be running guest CPUs.", + "type": "string", + "enum": [ + "running" + ] + }, + { + "description": "The VMM is shutting down.", + "type": "string", + "enum": [ + "stopping" + ] + }, + { + "description": "The VMM's guest has stopped, and the guest will not run again, but the VMM process may not have released all of its resources yet.", + "type": "string", + "enum": [ + "stopped" + ] + }, + { + "description": "The VMM is being restarted or its guest OS is rebooting.", + "type": "string", + "enum": [ + "rebooting" + ] + }, + { + "description": "The VMM is part of a live migration.", + "type": "string", + "enum": [ + "migrating" + ] + }, + { + "description": "The VMM process reported an internal failure.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "The VMM process has been destroyed and its resources have been released.", + "type": "string", + "enum": [ + "destroyed" + ] + } + ] + }, + "VmmStateRequested": { + "description": "Requestable running state of an Instance.\n\nA subset of [`omicron_common::api::external::InstanceState`].", + "oneOf": [ + { + "description": "Run this instance by migrating in from a previous running incarnation of the instance.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "migration_target" + ] + }, + "value": { + "$ref": "#/components/schemas/InstanceMigrationTargetParams" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Start the instance if it is not already running.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "running" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Stop the instance.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "stopped" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Immediately reset the instance, as though it had stopped and immediately began to run again.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "reboot" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "VmmUnregisterResponse": { + "description": "The response sent from a request to unregister an instance.", + "type": "object", + "properties": { + "updated_runtime": { + "nullable": true, + "description": "The current state of the instance after handling the request to unregister it. If the instance's state did not change, this field is `None`.", + "allOf": [ + { + "$ref": "#/components/schemas/SledVmmState" + } + ] + } + } + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "VpcFirewallIcmpFilter": { + "type": "object", + "properties": { + "code": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/IcmpParamRange" + } + ] + }, + "icmp_type": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "icmp_type" + ] + }, + "VpcFirewallRuleAction": { + "type": "string", + "enum": [ + "allow", + "deny" + ] + }, + "VpcFirewallRuleDirection": { + "type": "string", + "enum": [ + "inbound", + "outbound" + ] + }, + "VpcFirewallRuleProtocol": { + "description": "The protocols that may be specified in a firewall rule's filter", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "tcp" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "udp" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "icmp" + ] + }, + "value": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallIcmpFilter" + } + ] + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "VpcFirewallRuleStatus": { + "type": "string", + "enum": [ + "disabled", + "enabled" + ] + }, + "VpcFirewallRulesEnsureBody": { + "description": "Update firewall rules for a VPC", + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ResolvedVpcFirewallRule" + } + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "rules", + "vni" + ] + }, + "ZoneArtifactInventory": { + "description": "Inventory representation of a single zone artifact on a boot disk.\n\nPart of [`ZoneManifestBootInventory`].", + "type": "object", + "properties": { + "expected_hash": { + "description": "The expected digest of the file's contents.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "expected_size": { + "description": "The expected size of the file, in bytes.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "file_name": { + "description": "The name of the zone file on disk, for example `nexus.tar.gz`. Zone files are always \".tar.gz\".", + "type": "string" + }, + "path": { + "description": "The full path to the zone file.", + "type": "string", + "format": "Utf8PathBuf" + }, + "status": { + "description": "The status of the artifact.\n\nThis is `Ok(())` if the artifact is present and matches the expected size and digest, or an error message if it is missing or does not match.", + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "type": "string", + "enum": [ + null + ] + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + } + }, + "required": [ + "expected_hash", + "expected_size", + "file_name", + "path", + "status" + ] + }, + "ZoneBundleCause": { + "description": "The reason or cause for a zone bundle, i.e., why it was created.", + "oneOf": [ + { + "description": "Some other, unspecified reason.", + "type": "string", + "enum": [ + "other" + ] + }, + { + "description": "A zone bundle taken when a sled agent finds a zone that it does not expect to be running.", + "type": "string", + "enum": [ + "unexpected_zone" + ] + }, + { + "description": "An instance zone was terminated.", + "type": "string", + "enum": [ + "terminated_instance" + ] + } + ] + }, + "ZoneBundleId": { + "description": "An identifier for a zone bundle.", + "type": "object", + "properties": { + "bundle_id": { + "description": "The ID for this bundle itself.", + "type": "string", + "format": "uuid" + }, + "zone_name": { + "description": "The name of the zone this bundle is derived from.", + "type": "string" + } + }, + "required": [ + "bundle_id", + "zone_name" + ] + }, + "ZoneBundleMetadata": { + "description": "Metadata about a zone bundle.", + "type": "object", + "properties": { + "cause": { + "description": "The reason or cause a bundle was created.", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneBundleCause" + } + ] + }, + "id": { + "description": "Identifier for this zone bundle", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneBundleId" + } + ] + }, + "time_created": { + "description": "The time at which this zone bundle was created.", + "type": "string", + "format": "date-time" + }, + "version": { + "description": "A version number for this zone bundle.", + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "cause", + "id", + "time_created", + "version" + ] + }, + "ZoneImageResolverInventory": { + "description": "Inventory representation of zone image resolver status and health.", + "type": "object", + "properties": { + "mupdate_override": { + "description": "The mupdate override status.", + "allOf": [ + { + "$ref": "#/components/schemas/MupdateOverrideInventory" + } + ] + }, + "zone_manifest": { + "description": "The zone manifest status.", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneManifestInventory" + } + ] + } + }, + "required": [ + "mupdate_override", + "zone_manifest" + ] + }, + "ZoneManifestBootInventory": { + "description": "Inventory representation of zone artifacts on the boot disk.\n\nPart of [`ZoneManifestInventory`].", + "type": "object", + "properties": { + "artifacts": { + "title": "IdOrdMap", + "description": "The artifacts on disk.", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/ZoneArtifactInventory" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneArtifactInventory" + }, + "uniqueItems": true + }, + "source": { + "description": "The manifest source.\n\nIn production this is [`OmicronZoneManifestSource::Installinator`], but in some development and testing flows Sled Agent synthesizes zone manifests. In those cases, the source is [`OmicronZoneManifestSource::SledAgent`].", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneManifestSource" + } + ] + } + }, + "required": [ + "artifacts", + "source" + ] + }, + "ZoneManifestInventory": { + "description": "Inventory representation of a zone manifest.\n\nPart of [`ZoneImageResolverInventory`].\n\nA zone manifest is a listing of all the zones present in a system's install dataset. This struct contains information about the install dataset gathered from a system.", + "type": "object", + "properties": { + "boot_disk_path": { + "description": "The full path to the zone manifest file on the boot disk.", + "type": "string", + "format": "Utf8PathBuf" + }, + "boot_inventory": { + "description": "The manifest read from the boot disk, and whether the manifest is valid.", + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/ZoneManifestBootInventory" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/ZoneManifestBootInventory" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "non_boot_status": { + "title": "IdOrdMap", + "description": "Information about the install dataset on non-boot disks.", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/ZoneManifestNonBootInventory" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneManifestNonBootInventory" + }, + "uniqueItems": true + } + }, + "required": [ + "boot_disk_path", + "boot_inventory", + "non_boot_status" + ] + }, + "ZoneManifestNonBootInventory": { + "description": "Inventory representation of a zone manifest on a non-boot disk.\n\nUnlike [`ZoneManifestBootInventory`] which is structured since Reconfigurator makes decisions based on it, information about non-boot disks is purely advisory. For simplicity, we store information in an unstructured format.", + "type": "object", + "properties": { + "is_valid": { + "description": "Whether the status is valid.", + "type": "boolean" + }, + "message": { + "description": "A message describing the status.\n\nIf `is_valid` is true, then the message describes the list of artifacts found and their hashes.\n\nIf `is_valid` is false, then this message describes the reason for the invalid status. This could include errors reading the zone manifest, or zone file mismatches.", + "type": "string" + }, + "path": { + "description": "The full path to the zone manifest JSON on the non-boot disk.", + "type": "string", + "format": "Utf8PathBuf" + }, + "zpool_id": { + "description": "The ID of the non-boot zpool.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForInternalZpoolKind" + } + ] + } + }, + "required": [ + "is_valid", + "message", + "path", + "zpool_id" + ] + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + }, + "TypedUuidForPropolisKind": { + "type": "string", + "format": "uuid" + } + }, + "responses": { + "Error": { + "description": "Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} diff --git a/openapi/sled-agent/sled-agent-latest.json b/openapi/sled-agent/sled-agent-latest.json index b653d865fc0..381144dab9a 120000 --- a/openapi/sled-agent/sled-agent-latest.json +++ b/openapi/sled-agent/sled-agent-latest.json @@ -1 +1 @@ -sled-agent-3.0.0-f44f77.json \ No newline at end of file +sled-agent-4.0.0-fd6727.json \ No newline at end of file diff --git a/schema/all-zones-requests.json b/schema/all-zones-requests.json index e9400245755..7b07affc08d 100644 --- a/schema/all-zones-requests.json +++ b/schema/all-zones-requests.json @@ -642,6 +642,13 @@ "description": "The address at which the internal nexus server is reachable.", "type": "string" }, + "lockstep_port": { + "description": "The port at which the internal lockstep server is reachable. This shares the same IP address with `internal_address`.", + "default": 12232, + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, "nic": { "description": "The service vNIC providing external connectivity using OPTE.", "allOf": [ diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index dc3f479efa3..5126e39944b 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -4329,6 +4329,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_omicron_sled_config_zone ( image_source omicron.public.inv_zone_image_source NOT NULL, image_artifact_sha256 STRING(64), + -- Nexus lockstep service port, used only by Nexus zones + nexus_lockstep_port INT4 + CHECK (nexus_lockstep_port IS NULL OR nexus_lockstep_port BETWEEN 0 AND 65535), + CONSTRAINT zone_image_source_artifact_hash_present CHECK ( (image_source = 'artifact' AND image_artifact_sha256 IS NOT NULL) @@ -4337,6 +4341,12 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_omicron_sled_config_zone ( AND image_artifact_sha256 IS NULL) ), + CONSTRAINT nexus_lockstep_port_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_lockstep_port IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_lockstep_port IS NULL) + ), + PRIMARY KEY (inv_collection_id, sled_config_id, id) ); @@ -4782,6 +4792,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( -- Generation for Nexus zones nexus_generation INT8, + -- Nexus lockstep service port, used only by Nexus zones + nexus_lockstep_port INT4 + CHECK (nexus_lockstep_port IS NULL OR nexus_lockstep_port BETWEEN 0 AND 65535), + PRIMARY KEY (blueprint_id, id), CONSTRAINT expunged_disposition_properties CHECK ( @@ -4805,6 +4819,12 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( (zone_type = 'nexus' AND nexus_generation IS NOT NULL) OR (zone_type != 'nexus' AND nexus_generation IS NULL) + ), + + CONSTRAINT nexus_lockstep_port_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_lockstep_port IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_lockstep_port IS NULL) ) ); @@ -6668,7 +6688,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '192.0.0', NULL) + (TRUE, NOW(), NOW(), '193.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/nexus-lockstep-port/up01.sql b/schema/crdb/nexus-lockstep-port/up01.sql new file mode 100644 index 00000000000..a465ac8168c --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up01.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.bp_omicron_zone ADD COLUMN IF NOT EXISTS nexus_lockstep_port INT4 + CHECK (nexus_lockstep_port IS NULL OR nexus_lockstep_port BETWEEN 0 AND 65535); diff --git a/schema/crdb/nexus-lockstep-port/up02.sql b/schema/crdb/nexus-lockstep-port/up02.sql new file mode 100644 index 00000000000..40ead8a0ded --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up02.sql @@ -0,0 +1,5 @@ +SET LOCAL disallow_full_table_scans = off; + +UPDATE omicron.public.bp_omicron_zone +SET nexus_lockstep_port = 12232 +WHERE zone_type = 'nexus'; diff --git a/schema/crdb/nexus-lockstep-port/up03.sql b/schema/crdb/nexus-lockstep-port/up03.sql new file mode 100644 index 00000000000..920e12a3e5e --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up03.sql @@ -0,0 +1,5 @@ +ALTER TABLE omicron.public.bp_omicron_zone ADD CONSTRAINT IF NOT EXISTS nexus_lockstep_port_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_lockstep_port IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_lockstep_port IS NULL) +) diff --git a/schema/crdb/nexus-lockstep-port/up04.sql b/schema/crdb/nexus-lockstep-port/up04.sql new file mode 100644 index 00000000000..d18c97f0e2f --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up04.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.inv_omicron_sled_config_zone ADD COLUMN IF NOT EXISTS nexus_lockstep_port INT4 + CHECK (nexus_lockstep_port IS NULL OR nexus_lockstep_port BETWEEN 0 AND 65535); diff --git a/schema/crdb/nexus-lockstep-port/up05.sql b/schema/crdb/nexus-lockstep-port/up05.sql new file mode 100644 index 00000000000..193576643e8 --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up05.sql @@ -0,0 +1,5 @@ +SET LOCAL disallow_full_table_scans = off; + +UPDATE omicron.public.inv_omicron_sled_config_zone +SET nexus_lockstep_port = 12232 +WHERE zone_type = 'nexus'; diff --git a/schema/crdb/nexus-lockstep-port/up06.sql b/schema/crdb/nexus-lockstep-port/up06.sql new file mode 100644 index 00000000000..67008b93bfe --- /dev/null +++ b/schema/crdb/nexus-lockstep-port/up06.sql @@ -0,0 +1,5 @@ +ALTER TABLE omicron.public.inv_omicron_sled_config_zone ADD CONSTRAINT IF NOT EXISTS nexus_lockstep_port_for_nexus_zones CHECK ( + (zone_type = 'nexus' AND nexus_lockstep_port IS NOT NULL) + OR + (zone_type != 'nexus' AND nexus_lockstep_port IS NULL) +) diff --git a/sled-agent/api/Cargo.toml b/sled-agent/api/Cargo.toml index eb682524e57..581a7d23aa4 100644 --- a/sled-agent/api/Cargo.toml +++ b/sled-agent/api/Cargo.toml @@ -9,18 +9,21 @@ workspace = true [dependencies] camino.workspace = true +chrono.workspace = true dropshot.workspace = true http.workspace = true +id-map.workspace = true +iddqd.workspace = true nexus-sled-agent-shared.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true +openapi-manager-types.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true sled-agent-types.workspace = true -sled-hardware-types.workspace = true sled-diagnostics.workspace = true +sled-hardware-types.workspace = true tufaceous-artifact.workspace = true uuid.workspace = true -openapi-manager-types.workspace = true -semver.workspace = true diff --git a/sled-agent/api/src/lib.rs b/sled-agent/api/src/lib.rs index 1eb4c8395bd..55800ca2971 100644 --- a/sled-agent/api/src/lib.rs +++ b/sled-agent/api/src/lib.rs @@ -54,6 +54,9 @@ use sled_diagnostics::SledDiagnosticsQueryOutput; use tufaceous_artifact::ArtifactHash; use uuid::Uuid; +/// Copies of data types that changed between v3 and v4. +mod v3; + api_versions!([ // WHEN CHANGING THE API (part 1 of 2): // @@ -66,6 +69,7 @@ api_versions!([ // | example for the next person. // v // (next_int, IDENT), + (4, ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY), (3, ADD_SWITCH_ZONE_OPERATOR_POLICY), (2, REMOVE_DESTROY_ORPHANED_DATASETS_CHICKEN_SWITCH), (1, INITIAL), @@ -323,12 +327,26 @@ pub trait SledAgentApi { #[endpoint { method = PUT, path = "/omicron-config", + versions = VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY.., }] async fn omicron_config_put( rqctx: RequestContext, body: TypedBody, ) -> Result; + #[endpoint { + operation_id = "omicron_config_put", + method = PUT, + path = "/omicron-config", + versions = ..VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY, + }] + async fn v3_omicron_config_put( + rqctx: RequestContext, + body: TypedBody, + ) -> Result { + Self::omicron_config_put(rqctx, body.map(Into::into)).await + } + #[endpoint { method = GET, path = "/sled-role", @@ -551,11 +569,26 @@ pub trait SledAgentApi { #[endpoint { method = GET, path = "/inventory", + versions = VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY.., }] async fn inventory( rqctx: RequestContext, ) -> Result, HttpError>; + /// Fetch basic information about this sled + #[endpoint { + operation_id = "inventory", + method = GET, + path = "/inventory", + versions = ..VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY, + }] + async fn v3_inventory( + rqctx: RequestContext, + ) -> Result, HttpError> { + let HttpResponseOk(inventory) = Self::inventory(rqctx).await?; + Ok(HttpResponseOk(inventory.into())) + } + /// Fetch sled identifiers #[endpoint { method = GET, diff --git a/sled-agent/api/src/v3.rs b/sled-agent/api/src/v3.rs new file mode 100644 index 00000000000..4390cf36c60 --- /dev/null +++ b/sled-agent/api/src/v3.rs @@ -0,0 +1,486 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; +use std::time::Duration; + +use chrono::{DateTime, Utc}; +use id_map::{IdMap, IdMappable}; +use iddqd::IdOrdMap; +use nexus_sled_agent_shared::inventory::{ + self, BootPartitionContents, ConfigReconcilerInventoryResult, + HostPhase2DesiredSlots, InventoryDataset, InventoryDisk, InventoryZpool, + OmicronZoneDataset, OmicronZoneImageSource, OrphanedDataset, + RemoveMupdateOverrideInventory, SledRole, ZoneImageResolverInventory, +}; +use omicron_common::address::NEXUS_LOCKSTEP_PORT; +use omicron_common::{ + api::external::{ByteCount, Generation}, + api::internal::shared::{NetworkInterface, SourceNatConfig}, + disk::{DatasetConfig, OmicronPhysicalDiskConfig}, + zpool_name::ZpoolName, +}; +use omicron_uuid_kinds::{ + DatasetUuid, MupdateOverrideUuid, OmicronZoneUuid, PhysicalDiskUuid, + SledUuid, +}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use sled_hardware_types::{Baseboard, SledCpuFamily}; + +/// Identity and basic status information about this sled agent +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct Inventory { + pub sled_id: SledUuid, + pub sled_agent_address: SocketAddrV6, + pub sled_role: SledRole, + pub baseboard: Baseboard, + pub usable_hardware_threads: u32, + pub usable_physical_ram: ByteCount, + pub cpu_family: SledCpuFamily, + pub reservoir_size: ByteCount, + pub disks: Vec, + pub zpools: Vec, + pub datasets: Vec, + pub ledgered_sled_config: Option, + pub reconciler_status: ConfigReconcilerInventoryStatus, + pub last_reconciliation: Option, + pub zone_image_resolver: ZoneImageResolverInventory, +} + +impl From for Inventory { + fn from(value: inventory::Inventory) -> Self { + Self { + sled_id: value.sled_id, + sled_agent_address: value.sled_agent_address, + sled_role: value.sled_role, + baseboard: value.baseboard, + usable_hardware_threads: value.usable_hardware_threads, + usable_physical_ram: value.usable_physical_ram, + cpu_family: value.cpu_family, + reservoir_size: value.reservoir_size, + disks: value.disks, + zpools: value.zpools, + datasets: value.datasets, + ledgered_sled_config: value.ledgered_sled_config.map(Into::into), + reconciler_status: value.reconciler_status.into(), + last_reconciliation: value.last_reconciliation.map(Into::into), + zone_image_resolver: value.zone_image_resolver, + } + } +} + +/// Describes the set of Reconfigurator-managed configuration elements of a sled +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct OmicronSledConfig { + pub generation: Generation, + pub disks: IdMap, + pub datasets: IdMap, + pub zones: IdMap, + pub remove_mupdate_override: Option, + #[serde(default = "HostPhase2DesiredSlots::current_contents")] + pub host_phase_2: HostPhase2DesiredSlots, +} + +impl From for inventory::OmicronSledConfig { + fn from(value: OmicronSledConfig) -> Self { + Self { + generation: value.generation, + disks: value.disks, + datasets: value.datasets, + zones: value.zones.into_iter().map(Into::into).collect(), + remove_mupdate_override: value.remove_mupdate_override, + host_phase_2: value.host_phase_2, + } + } +} + +impl From for OmicronSledConfig { + fn from(value: inventory::OmicronSledConfig) -> Self { + Self { + generation: value.generation, + disks: value.disks, + datasets: value.datasets, + zones: value.zones.into_iter().map(Into::into).collect(), + remove_mupdate_override: value.remove_mupdate_override, + host_phase_2: value.host_phase_2, + } + } +} + +/// Describes one Omicron-managed zone running on a sled +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct OmicronZoneConfig { + pub id: OmicronZoneUuid, + + /// The pool on which we'll place this zone's root filesystem. + /// + /// Note that the root filesystem is transient -- the sled agent is + /// permitted to destroy this dataset each time the zone is initialized. + pub filesystem_pool: Option, + pub zone_type: OmicronZoneType, + // Use `InstallDataset` if this field is not present in a deserialized + // blueprint or ledger. + #[serde(default = "OmicronZoneImageSource::deserialize_default")] + pub image_source: OmicronZoneImageSource, +} + +impl IdMappable for OmicronZoneConfig { + type Id = OmicronZoneUuid; + + fn id(&self) -> Self::Id { + self.id + } +} + +impl From for inventory::OmicronZoneConfig { + fn from(value: OmicronZoneConfig) -> Self { + Self { + id: value.id, + filesystem_pool: value.filesystem_pool, + zone_type: value.zone_type.into(), + image_source: value.image_source, + } + } +} + +impl From for OmicronZoneConfig { + fn from(value: inventory::OmicronZoneConfig) -> Self { + Self { + id: value.id, + filesystem_pool: value.filesystem_pool, + zone_type: value.zone_type.into(), + image_source: value.image_source, + } + } +} + +/// Describes what kind of zone this is (i.e., what component is running in it) +/// as well as any type-specific configuration +#[derive(Deserialize, Serialize, JsonSchema)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum OmicronZoneType { + BoundaryNtp { + address: SocketAddrV6, + ntp_servers: Vec, + dns_servers: Vec, + domain: Option, + /// The service vNIC providing outbound connectivity using OPTE. + nic: NetworkInterface, + /// The SNAT configuration for outbound connections. + snat_cfg: SourceNatConfig, + }, + + /// Type of clickhouse zone used for a single node clickhouse deployment + Clickhouse { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + /// A zone used to run a Clickhouse Keeper node + /// + /// Keepers are only used in replicated clickhouse setups + ClickhouseKeeper { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + /// A zone used to run a Clickhouse Server in a replicated deployment + ClickhouseServer { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + CockroachDb { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + Crucible { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + CruciblePantry { + address: SocketAddrV6, + }, + ExternalDns { + dataset: OmicronZoneDataset, + /// The address at which the external DNS server API is reachable. + http_address: SocketAddrV6, + /// The address at which the external DNS server is reachable. + dns_address: SocketAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + }, + InternalDns { + dataset: OmicronZoneDataset, + http_address: SocketAddrV6, + dns_address: SocketAddrV6, + /// The addresses in the global zone which should be created + /// + /// For the DNS service, which exists outside the sleds's typical subnet + /// - adding an address in the GZ is necessary to allow inter-zone + /// traffic routing. + gz_address: Ipv6Addr, + + /// The address is also identified with an auxiliary bit of information + /// to ensure that the created global zone address can have a unique + /// name. + gz_address_index: u32, + }, + InternalNtp { + address: SocketAddrV6, + }, + Nexus { + /// The address at which the internal nexus server is reachable. + internal_address: SocketAddrV6, + /// The address at which the external nexus server is reachable. + external_ip: IpAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + /// Whether Nexus's external endpoint should use TLS + external_tls: bool, + /// External DNS servers Nexus can use to resolve external hosts. + external_dns_servers: Vec, + }, + Oximeter { + address: SocketAddrV6, + }, +} + +impl From for inventory::OmicronZoneType { + fn from(value: OmicronZoneType) -> Self { + match value { + OmicronZoneType::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + } => Self::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + }, + OmicronZoneType::Clickhouse { address, dataset } => { + Self::Clickhouse { address, dataset } + } + OmicronZoneType::ClickhouseKeeper { address, dataset } => { + Self::ClickhouseKeeper { address, dataset } + } + OmicronZoneType::ClickhouseServer { address, dataset } => { + Self::ClickhouseServer { address, dataset } + } + OmicronZoneType::CockroachDb { address, dataset } => { + Self::CockroachDb { address, dataset } + } + OmicronZoneType::Crucible { address, dataset } => { + Self::Crucible { address, dataset } + } + OmicronZoneType::CruciblePantry { address } => { + Self::CruciblePantry { address } + } + OmicronZoneType::ExternalDns { + dataset, + http_address, + dns_address, + nic, + } => Self::ExternalDns { dataset, http_address, dns_address, nic }, + OmicronZoneType::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + } => Self::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + }, + OmicronZoneType::InternalNtp { address } => { + Self::InternalNtp { address } + } + OmicronZoneType::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers, + } => Self::Nexus { + internal_address, + lockstep_port: NEXUS_LOCKSTEP_PORT, + external_ip, + nic, + external_tls, + external_dns_servers, + }, + OmicronZoneType::Oximeter { address } => Self::Oximeter { address }, + } + } +} + +impl From for OmicronZoneType { + fn from(value: inventory::OmicronZoneType) -> Self { + match value { + inventory::OmicronZoneType::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + } => Self::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + }, + inventory::OmicronZoneType::Clickhouse { address, dataset } => { + Self::Clickhouse { address, dataset } + } + inventory::OmicronZoneType::ClickhouseKeeper { + address, + dataset, + } => Self::ClickhouseKeeper { address, dataset }, + inventory::OmicronZoneType::ClickhouseServer { + address, + dataset, + } => Self::ClickhouseServer { address, dataset }, + inventory::OmicronZoneType::CockroachDb { address, dataset } => { + Self::CockroachDb { address, dataset } + } + inventory::OmicronZoneType::Crucible { address, dataset } => { + Self::Crucible { address, dataset } + } + inventory::OmicronZoneType::CruciblePantry { address } => { + Self::CruciblePantry { address } + } + inventory::OmicronZoneType::ExternalDns { + dataset, + http_address, + dns_address, + nic, + } => Self::ExternalDns { dataset, http_address, dns_address, nic }, + inventory::OmicronZoneType::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + } => Self::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + }, + inventory::OmicronZoneType::InternalNtp { address } => { + Self::InternalNtp { address } + } + inventory::OmicronZoneType::Nexus { + internal_address, + lockstep_port: _, + external_ip, + nic, + external_tls, + external_dns_servers, + } => Self::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers, + }, + inventory::OmicronZoneType::Oximeter { address } => { + Self::Oximeter { address } + } + } + } +} + +/// Describes the last attempt made by the sled-agent-config-reconciler to +/// reconcile the current sled config against the actual state of the sled. +#[derive(Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct ConfigReconcilerInventory { + pub last_reconciled_config: OmicronSledConfig, + pub external_disks: + BTreeMap, + pub datasets: BTreeMap, + pub orphaned_datasets: IdOrdMap, + pub zones: BTreeMap, + pub boot_partitions: BootPartitionContents, + /// The result of removing the mupdate override file on disk. + /// + /// `None` if `remove_mupdate_override` was not provided in the sled config. + pub remove_mupdate_override: Option, +} + +impl From for ConfigReconcilerInventory { + fn from(value: inventory::ConfigReconcilerInventory) -> Self { + Self { + last_reconciled_config: value.last_reconciled_config.into(), + external_disks: value.external_disks, + datasets: value.datasets, + orphaned_datasets: value.orphaned_datasets, + zones: value.zones, + boot_partitions: value.boot_partitions, + remove_mupdate_override: value.remove_mupdate_override, + } + } +} + +/// Status of the sled-agent-config-reconciler task. +#[derive(Deserialize, Serialize, JsonSchema)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ConfigReconcilerInventoryStatus { + /// The reconciler task has not yet run for the first time since sled-agent + /// started. + NotYetRun, + /// The reconciler task is actively running. + Running { + config: OmicronSledConfig, + started_at: DateTime, + running_for: Duration, + }, + /// The reconciler task is currently idle, but previously did complete a + /// reconciliation attempt. + /// + /// This variant does not include the `OmicronSledConfig` used in the last + /// attempt, because that's always available via + /// [`ConfigReconcilerInventory::last_reconciled_config`]. + Idle { completed_at: DateTime, ran_for: Duration }, +} + +impl From + for ConfigReconcilerInventoryStatus +{ + fn from(value: inventory::ConfigReconcilerInventoryStatus) -> Self { + match value { + inventory::ConfigReconcilerInventoryStatus::NotYetRun => { + Self::NotYetRun + } + inventory::ConfigReconcilerInventoryStatus::Running { + config, + started_at, + running_for, + } => { + Self::Running { config: config.into(), started_at, running_for } + } + inventory::ConfigReconcilerInventoryStatus::Idle { + completed_at, + ran_for, + } => Self::Idle { completed_at, ran_for }, + } + } +} diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 37ea4757d9f..172a2630359 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -22,9 +22,9 @@ use nexus_types::deployment::{ }; use omicron_common::address::{ DENDRITE_PORT, DNS_HTTP_PORT, DNS_PORT, Ipv6Subnet, MGD_PORT, MGS_PORT, - NEXUS_INTERNAL_PORT, NTP_PORT, NUM_SOURCE_NAT_PORTS, REPO_DEPOT_PORT, - RSS_RESERVED_ADDRESSES, ReservedRackSubnet, SLED_PREFIX, get_sled_address, - get_switch_zone_address, + NEXUS_INTERNAL_PORT, NEXUS_LOCKSTEP_PORT, NTP_PORT, NUM_SOURCE_NAT_PORTS, + REPO_DEPOT_PORT, RSS_RESERVED_ADDRESSES, ReservedRackSubnet, SLED_PREFIX, + get_sled_address, get_switch_zone_address, }; use omicron_common::api::external::{Generation, MacAddr, Vni}; use omicron_common::api::internal::shared::{ @@ -544,11 +544,7 @@ impl Plan { let internal_address = SocketAddrV6::new(ip, NEXUS_INTERNAL_PORT, 0, 0); dns_builder - .host_zone_with_one_backend( - id, - ServiceName::Nexus, - internal_address, - ) + .host_zone_nexus(id, internal_address, NEXUS_LOCKSTEP_PORT) .unwrap(); let (nic, external_ip) = svc_port_builder.next_nexus(id)?; let filesystem_pool = sled.alloc_zpool_from_u2s()?; @@ -558,6 +554,7 @@ impl Plan { zone_type: BlueprintZoneType::Nexus( blueprint_zone_type::Nexus { internal_address, + lockstep_port: NEXUS_LOCKSTEP_PORT, external_ip: from_ipaddr_to_external_floating_ip( external_ip, ), diff --git a/sled-agent/src/server.rs b/sled-agent/src/server.rs index 99bfae05ee5..5706bf717f1 100644 --- a/sled-agent/src/server.rs +++ b/sled-agent/src/server.rs @@ -73,17 +73,20 @@ impl Server { ..config.dropshot.clone() }; let dropshot_log = log.new(o!("component" => "dropshot (SledAgent)")); - let http_server = - dropshot::ServerBuilder::new(http_api(), sled_agent, dropshot_log) - .config(dropshot_config) - .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( - dropshot::ClientSpecifiesVersionInHeader::new( - omicron_common::api::VERSION_HEADER, - sled_agent_api::VERSION_ADD_SWITCH_ZONE_OPERATOR_POLICY, - ), - ))) - .start() - .map_err(|error| format!("initializing server: {}", error))?; + let http_server = dropshot::ServerBuilder::new( + http_api(), + sled_agent, + dropshot_log, + ) + .config(dropshot_config) + .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( + dropshot::ClientSpecifiesVersionInHeader::new( + omicron_common::api::VERSION_HEADER, + sled_agent_api::VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY, + ), + ))) + .start() + .map_err(|error| format!("initializing server: {}", error))?; Ok(Server { http_server }) } diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 1dfdf191c78..680c3ac40ba 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -2281,6 +2281,7 @@ impl ServiceManager { zone_type: OmicronZoneType::Nexus { internal_address, + lockstep_port, external_tls, external_dns_servers, .. @@ -2344,6 +2345,15 @@ impl ServiceManager { default_handler_task_mode: HandlerTaskMode::Detached, log_headers: vec![], }, + dropshot_lockstep: dropshot::ConfigDropshot { + bind_address: SocketAddr::new( + (*internal_address.ip()).into(), + *lockstep_port, + ), + default_request_body_max_bytes: 1048576, + default_handler_task_mode: HandlerTaskMode::Detached, + log_headers: vec![], + }, internal_dns: nexus_config::InternalDns::FromSubnet { subnet: Ipv6Subnet::::new( info.underlay_address, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 006d37d8d4a..b0c65ba3e87 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -123,7 +123,7 @@ impl Server { .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( dropshot::ClientSpecifiesVersionInHeader::new( omicron_common::api::VERSION_HEADER, - sled_agent_api::VERSION_ADD_SWITCH_ZONE_OPERATOR_POLICY, + sled_agent_api::VERSION_ADD_NEXUS_LOCKSTEP_PORT_TO_INVENTORY, ), ))) .start() @@ -433,6 +433,7 @@ pub async fn run_standalone_server( SocketAddr::V4(_) => panic!("did not expect v4 address"), SocketAddr::V6(a) => a, }, + lockstep_port: 0, external_ip: from_ipaddr_to_external_floating_ip(ip), nic: nexus_types::inventory::NetworkInterface { id: Uuid::new_v4(), From c9f2bfcf2f730bd51c0cdc5e63e16f1c01b74da6 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Tue, 23 Sep 2025 22:17:35 -0700 Subject: [PATCH 07/18] [2/n] move instance_migrate and APIs used by tests/omdb from nexus-internal to nexus-lockstep (#9037) Part of #8902. Stacked on #8983. --- Cargo.lock | 24 +- clients/nexus-client/src/lib.rs | 37 - clients/nexus-lockstep-client/Cargo.toml | 6 + clients/nexus-lockstep-client/src/lib.rs | 84 +- dev-tools/omdb/Cargo.toml | 16 +- dev-tools/omdb/src/bin/omdb/db/saga.rs | 20 +- dev-tools/omdb/src/bin/omdb/nexus.rs | 123 +- dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs | 14 +- .../bin/omdb/nexus/reconfigurator_config.rs | 6 +- .../omdb/src/bin/omdb/nexus/update_status.rs | 2 +- dev-tools/omdb/src/bin/omdb/support_bundle.rs | 16 +- dev-tools/omdb/tests/test_all_output.rs | 16 +- dev-tools/omdb/tests/usage_errors.out | 37 +- end-to-end-tests/Cargo.toml | 31 +- end-to-end-tests/src/noop_blueprint.rs | 10 +- live-tests/Cargo.toml | 2 +- live-tests/tests/common/mod.rs | 8 +- live-tests/tests/common/reconfigurator.rs | 10 +- live-tests/tests/test_nexus_add_remove.rs | 12 +- live-tests/tests/test_nexus_handoff.rs | 2 +- nexus/Cargo.toml | 18 +- nexus/internal-api/src/lib.rs | 519 +- nexus/lockstep-api/Cargo.toml | 6 + nexus/lockstep-api/src/lib.rs | 552 +- .../cli-integration-tests/Cargo.toml | 6 +- .../tests/integration/blueprint_edit.rs | 16 +- nexus/src/app/quiesce.rs | 33 +- nexus/src/internal_api/http_entrypoints.rs | 976 +- nexus/src/lockstep_api/http_entrypoints.rs | 1006 +- nexus/test-utils/Cargo.toml | 6 +- nexus/test-utils/src/background.rs | 72 +- .../crucible_replacements.rs | 87 +- nexus/tests/integration_tests/demo_saga.rs | 12 +- nexus/tests/integration_tests/disks.rs | 4 +- nexus/tests/integration_tests/instances.rs | 38 +- nexus/tests/integration_tests/metrics.rs | 8 +- nexus/tests/integration_tests/quiesce.rs | 8 +- nexus/tests/integration_tests/rack.rs | 2 +- nexus/tests/integration_tests/snapshots.rs | 4 +- .../integration_tests/support_bundles.rs | 4 +- nexus/tests/integration_tests/updates.rs | 8 +- .../integration_tests/volume_management.rs | 20 +- nexus/tests/integration_tests/webhooks.rs | 92 +- openapi/nexus-internal.json | 10776 +++++----------- openapi/nexus-lockstep.json | 7166 +++++++++- 45 files changed, 12613 insertions(+), 9302 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c633d3b44d..e0684b0c318 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3144,7 +3144,7 @@ dependencies = [ "internet-checksum", "ispf", "macaddr", - "nexus-client", + "nexus-lockstep-client", "omicron-sled-agent", "omicron-test-utils", "omicron-uuid-kinds", @@ -6675,8 +6675,14 @@ name = "nexus-lockstep-api" version = "0.1.0" dependencies = [ "dropshot", + "http", "nexus-types", + "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", + "schemars", + "serde", + "uuid", ] [[package]] @@ -6685,12 +6691,18 @@ version = "0.1.0" dependencies = [ "chrono", "futures", + "iddqd", + "nexus-types", + "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", + "oxnet", "progenitor 0.10.0", "regress", "reqwest", "schemars", "serde", + "serde_json", "slog", "uuid", ] @@ -6820,8 +6832,8 @@ dependencies = [ "camino", "camino-tempfile", "clap", - "nexus-client", "nexus-db-queries", + "nexus-lockstep-client", "nexus-reconfigurator-preparation", "nexus-test-utils", "nexus-test-utils-macros", @@ -7116,9 +7128,9 @@ dependencies = [ "illumos-utils", "internal-dns-resolver", "internal-dns-types", - "nexus-client", "nexus-config", "nexus-db-queries", + "nexus-lockstep-client", "nexus-sled-agent-shared", "nexus-test-interface", "nexus-types", @@ -7866,11 +7878,11 @@ dependencies = [ "internal-dns-resolver", "internal-dns-types", "live-tests-macros", - "nexus-client", "nexus-config", "nexus-db-model", "nexus-db-queries", "nexus-inventory", + "nexus-lockstep-client", "nexus-reconfigurator-planning", "nexus-reconfigurator-preparation", "nexus-sled-agent-shared", @@ -7971,7 +7983,6 @@ dependencies = [ "mg-admin-client", "nexus-auth", "nexus-background-task-interface", - "nexus-client", "nexus-config", "nexus-db-lookup", "nexus-db-model", @@ -7982,6 +7993,7 @@ dependencies = [ "nexus-internal-api", "nexus-inventory", "nexus-lockstep-api", + "nexus-lockstep-client", "nexus-metrics-producer-gc", "nexus-mgs-updates", "nexus-networking", @@ -8153,7 +8165,6 @@ dependencies = [ "ipnetwork", "itertools 0.14.0", "multimap", - "nexus-client", "nexus-config", "nexus-db-errors", "nexus-db-lookup", @@ -8161,6 +8172,7 @@ dependencies = [ "nexus-db-queries", "nexus-db-schema", "nexus-inventory", + "nexus-lockstep-client", "nexus-reconfigurator-preparation", "nexus-saga-recovery", "nexus-sled-agent-shared", diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index ada5aa718c3..9d596230ac5 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -5,10 +5,7 @@ //! Interface for making API requests to the Oxide control plane at large //! from within the control plane -use iddqd::IdOrdItem; -use iddqd::id_upcast; use std::collections::HashMap; -use uuid::Uuid; progenitor::generate_api!( spec = "../../openapi/nexus-internal.json", @@ -38,8 +35,6 @@ progenitor::generate_api!( BlueprintPhysicalDiskDisposition = nexus_types::deployment::BlueprintPhysicalDiskDisposition, BlueprintZoneImageSource = nexus_types::deployment::BlueprintZoneImageSource, Certificate = omicron_common::api::internal::nexus::Certificate, - ClickhouseMode = nexus_types::deployment::ClickhouseMode, - ClickhousePolicy = nexus_types::deployment::ClickhousePolicy, DatasetKind = omicron_common::api::internal::shared::DatasetKind, DnsConfigParams = nexus_types::internal_api::params::DnsConfigParams, DnsConfigZone = nexus_types::internal_api::params::DnsConfigZone, @@ -47,26 +42,17 @@ progenitor::generate_api!( Generation = omicron_common::api::external::Generation, ImportExportPolicy = omicron_common::api::external::ImportExportPolicy, MacAddr = omicron_common::api::external::MacAddr, - MgsUpdateDriverStatus = nexus_types::internal_api::views::MgsUpdateDriverStatus, Name = omicron_common::api::external::Name, NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, NetworkInterfaceKind = omicron_common::api::internal::shared::NetworkInterfaceKind, NewPasswordHash = omicron_passwords::NewPasswordHash, - OmicronPhysicalDiskConfig = omicron_common::disk::OmicronPhysicalDiskConfig, - OmicronPhysicalDisksConfig = omicron_common::disk::OmicronPhysicalDisksConfig, OximeterReadMode = nexus_types::deployment::OximeterReadMode, - OximeterReadPolicy = nexus_types::deployment::OximeterReadPolicy, PendingMgsUpdate = nexus_types::deployment::PendingMgsUpdate, PlannerConfig = nexus_types::deployment::PlannerConfig, - ReconfiguratorConfig = nexus_types::deployment::ReconfiguratorConfig, - ReconfiguratorConfigParam = nexus_types::deployment::ReconfiguratorConfigParam, - ReconfiguratorConfigView = nexus_types::deployment::ReconfiguratorConfigView, RecoverySiloConfig = nexus_sled_agent_shared::recovery_silo::RecoverySiloConfig, Srv = nexus_types::internal_api::params::Srv, TypedUuidForBlueprintKind = omicron_uuid_kinds::BlueprintUuid, - TypedUuidForCollectionKind = omicron_uuid_kinds::CollectionUuid, TypedUuidForDatasetKind = omicron_uuid_kinds::TypedUuid, - TypedUuidForDemoSagaKind = omicron_uuid_kinds::DemoSagaUuid, TypedUuidForDownstairsKind = omicron_uuid_kinds::TypedUuid, TypedUuidForPhysicalDiskKind = omicron_uuid_kinds::TypedUuid, TypedUuidForPropolisKind = omicron_uuid_kinds::TypedUuid, @@ -76,9 +62,6 @@ progenitor::generate_api!( TypedUuidForUpstairsSessionKind = omicron_uuid_kinds::TypedUuid, TypedUuidForVolumeKind = omicron_uuid_kinds::TypedUuid, TypedUuidForZpoolKind = omicron_uuid_kinds::TypedUuid, - UpdateStatus = nexus_types::internal_api::views::UpdateStatus, - ZoneStatus = nexus_types::internal_api::views::ZoneStatus, - ZoneStatusVersion = nexus_types::internal_api::views::ZoneStatusVersion, ZpoolName = omicron_common::zpool_name::ZpoolName, }, patch = { @@ -88,26 +71,6 @@ progenitor::generate_api!( } ); -impl IdOrdItem for types::PendingSagaInfo { - type Key<'a> = Uuid; - - fn key(&self) -> Self::Key<'_> { - self.saga_id - } - - id_upcast!(); -} - -impl IdOrdItem for types::HeldDbClaimInfo { - type Key<'a> = u64; - - fn key(&self) -> Self::Key<'_> { - self.id - } - - id_upcast!(); -} - impl omicron_common::api::external::ClientError for types::Error { fn message(&self) -> String { self.message.clone() diff --git a/clients/nexus-lockstep-client/Cargo.toml b/clients/nexus-lockstep-client/Cargo.toml index 94bf5afffe4..c64acbf1d94 100644 --- a/clients/nexus-lockstep-client/Cargo.toml +++ b/clients/nexus-lockstep-client/Cargo.toml @@ -10,11 +10,17 @@ workspace = true [dependencies] chrono.workspace = true futures.workspace = true +iddqd.workspace = true +nexus-types.workspace = true +omicron-common.workspace = true +omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true +oxnet.workspace = true progenitor.workspace = true regress.workspace = true reqwest.workspace = true schemars.workspace = true serde.workspace = true +serde_json.workspace = true slog.workspace = true uuid.workspace = true diff --git a/clients/nexus-lockstep-client/src/lib.rs b/clients/nexus-lockstep-client/src/lib.rs index c6ce38dc372..eb965b796fe 100644 --- a/clients/nexus-lockstep-client/src/lib.rs +++ b/clients/nexus-lockstep-client/src/lib.rs @@ -6,10 +6,14 @@ //! callers that update in lockstep with Nexus itself (e.g. rack initialization, //! tests and debugging) +use iddqd::IdOrdItem; +use iddqd::id_upcast; +use uuid::Uuid; + progenitor::generate_api!( spec = "../../openapi/nexus-lockstep.json", interface = Positional, - derives = [schemars::JsonSchema], + derives = [schemars::JsonSchema, PartialEq], inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; @@ -21,4 +25,82 @@ progenitor::generate_api!( post_hook = (|log: &slog::Logger, result: &Result<_, _>| { slog::debug!(log, "client response"; "result" => ?result); }), + crates = { + "iddqd" = "*", + "oxnet" = "0.1.0", + }, + replace = { + // It's kind of unfortunate to pull in such a complex and unstable type + // as "blueprint" this way, but we have really useful functionality + // (e.g., diff'ing) that's implemented on our local type. + Blueprint = nexus_types::deployment::Blueprint, + BlueprintPhysicalDiskConfig = nexus_types::deployment::BlueprintPhysicalDiskConfig, + BlueprintPhysicalDiskDisposition = nexus_types::deployment::BlueprintPhysicalDiskDisposition, + BlueprintZoneImageSource = nexus_types::deployment::BlueprintZoneImageSource, + ClickhouseMode = nexus_types::deployment::ClickhouseMode, + ClickhousePolicy = nexus_types::deployment::ClickhousePolicy, + DatasetKind = omicron_common::api::internal::shared::DatasetKind, + Generation = omicron_common::api::external::Generation, + MacAddr = omicron_common::api::external::MacAddr, + MgsUpdateDriverStatus = nexus_types::internal_api::views::MgsUpdateDriverStatus, + Name = omicron_common::api::external::Name, + NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, + NetworkInterfaceKind = omicron_common::api::internal::shared::NetworkInterfaceKind, + OmicronPhysicalDiskConfig = omicron_common::disk::OmicronPhysicalDiskConfig, + OmicronPhysicalDisksConfig = omicron_common::disk::OmicronPhysicalDisksConfig, + OximeterReadMode = nexus_types::deployment::OximeterReadMode, + OximeterReadPolicy = nexus_types::deployment::OximeterReadPolicy, + PendingMgsUpdate = nexus_types::deployment::PendingMgsUpdate, + ReconfiguratorConfig = nexus_types::deployment::ReconfiguratorConfig, + ReconfiguratorConfigParam = nexus_types::deployment::ReconfiguratorConfigParam, + ReconfiguratorConfigView = nexus_types::deployment::ReconfiguratorConfigView, + TypedUuidForBlueprintKind = omicron_uuid_kinds::BlueprintUuid, + TypedUuidForDatasetKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForDemoSagaKind = omicron_uuid_kinds::DemoSagaUuid, + TypedUuidForPhysicalDiskKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForSledKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForZpoolKind = omicron_uuid_kinds::TypedUuid, + UpdateStatus = nexus_types::internal_api::views::UpdateStatus, + ZoneStatus = nexus_types::internal_api::views::ZoneStatus, + ZoneStatusVersion = nexus_types::internal_api::views::ZoneStatusVersion, + ZpoolName = omicron_common::zpool_name::ZpoolName, + }, + patch = { + ByteCount = { derives = [PartialEq, Eq] }, + Baseboard = { derives = [PartialEq, Eq] } + } ); + +impl IdOrdItem for types::PendingSagaInfo { + type Key<'a> = Uuid; + + fn key(&self) -> Self::Key<'_> { + self.saga_id + } + + id_upcast!(); +} + +impl IdOrdItem for types::HeldDbClaimInfo { + type Key<'a> = u64; + + fn key(&self) -> Self::Key<'_> { + self.id + } + + id_upcast!(); +} + +impl From for types::Duration { + fn from(s: std::time::Duration) -> Self { + Self { secs: s.as_secs(), nanos: s.subsec_nanos() } + } +} + +impl From for std::time::Duration { + fn from(s: types::Duration) -> Self { + std::time::Duration::from_nanos( + s.secs * 1000000000 + u64::from(s.nanos), + ) + } +} diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index e4586238011..ca4123438c6 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -37,10 +37,12 @@ http.workspace = true humantime.workspace = true iddqd.workspace = true indent_write.workspace = true +indicatif.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true +ipnetwork.workspace = true itertools.workspace = true -nexus-client.workspace = true +multimap.workspace = true nexus-config.workspace = true nexus-db-errors.workspace = true nexus-db-lookup.workspace = true @@ -48,15 +50,20 @@ nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-db-schema.workspace = true nexus-inventory.workspace = true +nexus-lockstep-client.workspace = true nexus-reconfigurator-preparation.workspace = true nexus-saga-recovery.workspace = true nexus-sled-agent-shared.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true +omicron-workspace-hack.workspace = true +owo-colors.workspace = true oxide-tokio-rt.workspace = true oximeter-client.workspace = true oximeter-db = { workspace = true, default-features = false, features = [ "oxql" ] } +oxnet.workspace = true +petgraph.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" ratatui.workspace = true @@ -79,13 +86,6 @@ unicode-width.workspace = true update-engine.workspace = true url.workspace = true uuid.workspace = true -ipnetwork.workspace = true -omicron-workspace-hack.workspace = true -multimap.workspace = true -indicatif.workspace = true -petgraph.workspace = true -oxnet.workspace = true -owo-colors.workspace = true [dev-dependencies] camino-tempfile.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db/saga.rs b/dev-tools/omdb/src/bin/omdb/db/saga.rs index 5113a4ad3fa..91ca84ca79e 100644 --- a/dev-tools/omdb/src/bin/omdb/db/saga.rs +++ b/dev-tools/omdb/src/bin/omdb/db/saga.rs @@ -607,7 +607,7 @@ async fn get_saga_sec_status( } }; - let client = nexus_client::Client::new( + let client = nexus_lockstep_client::Client::new( &format!("http://[{addr}]:{port}/"), opctx.log.clone(), ); @@ -618,21 +618,21 @@ async fn get_saga_sec_status( } Err(e) => match e { - nexus_client::Error::InvalidRequest(_) - | nexus_client::Error::InvalidUpgrade(_) - | nexus_client::Error::ErrorResponse(_) - | nexus_client::Error::ResponseBodyError(_) - | nexus_client::Error::InvalidResponsePayload(_, _) - | nexus_client::Error::UnexpectedResponse(_) - | nexus_client::Error::PreHookError(_) - | nexus_client::Error::PostHookError(_) => { + nexus_lockstep_client::Error::InvalidRequest(_) + | nexus_lockstep_client::Error::InvalidUpgrade(_) + | nexus_lockstep_client::Error::ErrorResponse(_) + | nexus_lockstep_client::Error::ResponseBodyError(_) + | nexus_lockstep_client::Error::InvalidResponsePayload(_, _) + | nexus_lockstep_client::Error::UnexpectedResponse(_) + | nexus_lockstep_client::Error::PreHookError(_) + | nexus_lockstep_client::Error::PostHookError(_) => { return SagaSecStatus::SecPingError { sec_id: current_sec, observed_error: e.to_string(), }; } - nexus_client::Error::CommunicationError(_) => { + nexus_lockstep_client::Error::CommunicationError(_) => { // Assume communication error means that it could not be // contacted. // diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index fbaedc02d8a..a1e006fd2e0 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -31,19 +31,19 @@ use futures::TryStreamExt; use http::StatusCode; use internal_dns_types::names::ServiceName; use itertools::Itertools; -use nexus_client::types::ActivationReason; -use nexus_client::types::BackgroundTask; -use nexus_client::types::BackgroundTasksActivateRequest; -use nexus_client::types::CurrentStatus; -use nexus_client::types::LastResult; -use nexus_client::types::PhysicalDiskPath; -use nexus_client::types::SagaState; -use nexus_client::types::SledSelector; -use nexus_client::types::UninitializedSledId; use nexus_db_lookup::LookupPath; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_inventory::now_db_precision; +use nexus_lockstep_client::types::ActivationReason; +use nexus_lockstep_client::types::BackgroundTask; +use nexus_lockstep_client::types::BackgroundTasksActivateRequest; +use nexus_lockstep_client::types::CurrentStatus; +use nexus_lockstep_client::types::LastResult; +use nexus_lockstep_client::types::PhysicalDiskPath; +use nexus_lockstep_client::types::SagaState; +use nexus_lockstep_client::types::SledSelector; +use nexus_lockstep_client::types::UninitializedSledId; use nexus_saga_recovery::LastPass; use nexus_types::deployment::Blueprint; use nexus_types::deployment::ClickhouseMode; @@ -113,7 +113,7 @@ use uuid::Uuid; /// Arguments to the "omdb nexus" subcommand #[derive(Debug, Args)] pub struct NexusArgs { - /// URL of the Nexus internal API + /// URL of the Nexus internal lockstep API #[clap( long, env = "OMDB_NEXUS_URL", @@ -268,7 +268,7 @@ impl BlueprintIdOrCurrentTarget { async fn resolve_to_id_via_nexus( &self, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> anyhow::Result { match self { Self::CurrentTarget => { @@ -284,7 +284,7 @@ impl BlueprintIdOrCurrentTarget { async fn resolve_to_blueprint( &self, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> anyhow::Result { let id = self.resolve_to_id_via_nexus(client).await?; let response = client @@ -614,13 +614,14 @@ impl NexusArgs { "note: Nexus URL not specified. Will pick one from DNS." ); let addr = omdb - .dns_lookup_one(log.clone(), ServiceName::Nexus) + .dns_lookup_one(log.clone(), ServiceName::NexusLockstep) .await?; format!("http://{}", addr) } }; eprintln!("note: using Nexus URL {}", &nexus_url); - let client = nexus_client::Client::new(&nexus_url, log.clone()); + let client = + nexus_lockstep_client::Client::new(&nexus_url, log.clone()); match &self.command { NexusCommands::BackgroundTasks(BackgroundTasksArgs { @@ -837,7 +838,7 @@ impl NexusArgs { /// Runs `omdb nexus background-tasks doc` async fn cmd_nexus_background_tasks_doc( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let response = client.bgtask_list().await.context("listing background tasks")?; @@ -863,7 +864,7 @@ async fn cmd_nexus_background_tasks_doc( /// Runs `omdb nexus background-tasks list` async fn cmd_nexus_background_tasks_list( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let response = client.bgtask_list().await.context("listing background tasks")?; @@ -881,7 +882,7 @@ async fn cmd_nexus_background_tasks_list( /// Runs `omdb nexus background-tasks show` async fn cmd_nexus_background_tasks_show( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BackgroundTasksShowArgs, ) -> Result<(), anyhow::Error> { let response = @@ -966,7 +967,7 @@ async fn cmd_nexus_background_tasks_show( /// Runs `omdb nexus background-tasks print-report` async fn cmd_nexus_background_tasks_print_report( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BackgroundTasksPrintReportArgs, color: ColorChoice, ) -> Result<(), anyhow::Error> { @@ -1009,7 +1010,7 @@ async fn cmd_nexus_background_tasks_print_report( /// Runs `omdb nexus background-tasks activate` async fn cmd_nexus_background_tasks_activate( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BackgroundTasksActivateArgs, // This isn't quite "destructive" in the sense that of it being potentially // dangerous, but it does modify the system rather than being a read-only @@ -3240,7 +3241,7 @@ fn reason_code(reason: ActivationReason) -> char { } async fn cmd_nexus_blueprints_list( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] @@ -3312,7 +3313,7 @@ async fn cmd_nexus_blueprints_list( } async fn cmd_nexus_blueprints_show( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BlueprintIdArgs, ) -> Result<(), anyhow::Error> { let blueprint = args.blueprint_id.resolve_to_blueprint(client).await?; @@ -3321,7 +3322,7 @@ async fn cmd_nexus_blueprints_show( } async fn cmd_nexus_blueprints_diff( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BlueprintDiffArgs, ) -> Result<(), anyhow::Error> { let blueprint = args.blueprint1_id.resolve_to_blueprint(client).await?; @@ -3348,7 +3349,7 @@ async fn cmd_nexus_blueprints_diff( } async fn cmd_nexus_blueprints_delete( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BlueprintIdArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3363,7 +3364,7 @@ async fn cmd_nexus_blueprints_delete( } async fn cmd_nexus_blueprints_target_show( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let target = client .blueprint_target_view() @@ -3376,7 +3377,7 @@ async fn cmd_nexus_blueprints_target_show( } async fn cmd_nexus_blueprints_target_set( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BlueprintTargetSetArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3432,10 +3433,12 @@ async fn cmd_nexus_blueprints_target_set( }; client - .blueprint_target_set(&nexus_client::types::BlueprintTargetSet { - target_id: args.blueprint_id, - enabled, - }) + .blueprint_target_set( + &nexus_lockstep_client::types::BlueprintTargetSet { + target_id: args.blueprint_id, + enabled, + }, + ) .await .with_context(|| { format!("setting target to blueprint {}", args.blueprint_id) @@ -3445,7 +3448,7 @@ async fn cmd_nexus_blueprints_target_set( } async fn cmd_nexus_blueprints_target_set_enabled( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &BlueprintIdArgs, enabled: bool, _destruction_token: DestructiveOperationToken, @@ -3455,7 +3458,7 @@ async fn cmd_nexus_blueprints_target_set_enabled( let description = if enabled { "enabled" } else { "disabled" }; client .blueprint_target_set_enabled( - &nexus_client::types::BlueprintTargetSet { + &nexus_lockstep_client::types::BlueprintTargetSet { target_id: blueprint_id, enabled, }, @@ -3469,7 +3472,7 @@ async fn cmd_nexus_blueprints_target_set_enabled( } async fn cmd_nexus_blueprints_regenerate( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { let blueprint = @@ -3479,7 +3482,7 @@ async fn cmd_nexus_blueprints_regenerate( } async fn cmd_nexus_blueprints_import( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, _destruction_token: DestructiveOperationToken, args: &BlueprintImportArgs, ) -> Result<(), anyhow::Error> { @@ -3497,7 +3500,7 @@ async fn cmd_nexus_blueprints_import( } async fn cmd_nexus_clickhouse_policy_get( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let res = client.clickhouse_policy_get().await; @@ -3541,7 +3544,7 @@ async fn cmd_nexus_clickhouse_policy_get( } async fn cmd_nexus_mgs_updates( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let response = client .mgs_updates() @@ -3553,7 +3556,7 @@ async fn cmd_nexus_mgs_updates( } async fn cmd_nexus_clickhouse_policy_set( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &ClickhousePolicySetArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3603,7 +3606,7 @@ async fn cmd_nexus_clickhouse_policy_set( } async fn cmd_nexus_oximeter_read_policy_get( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let res = client.oximeter_read_policy_get().await; @@ -3637,7 +3640,7 @@ async fn cmd_nexus_oximeter_read_policy_get( } async fn cmd_nexus_oximeter_read_policy_set( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &OximeterReadPolicySetArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3681,7 +3684,7 @@ async fn cmd_nexus_oximeter_read_policy_set( /// Runs `omdb nexus sagas list` async fn cmd_nexus_sagas_list( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { // We don't want users to confuse this with a general way to list all sagas. // Such a command would read database state and it would go under "omdb db". @@ -3726,7 +3729,7 @@ async fn cmd_nexus_sagas_list( /// Runs `omdb nexus sagas demo-create` async fn cmd_nexus_sagas_demo_create( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { let demo_saga = @@ -3741,7 +3744,7 @@ async fn cmd_nexus_sagas_demo_create( /// Runs `omdb nexus sagas demo-complete` async fn cmd_nexus_sagas_demo_complete( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &DemoSagaIdArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3766,7 +3769,7 @@ async fn cmd_nexus_sagas_demo_complete( /// Runs `omdb nexus sleds list-uninitialized` async fn cmd_nexus_sleds_list_uninitialized( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let response = client .sled_list_uninitialized() @@ -3808,7 +3811,7 @@ async fn cmd_nexus_sleds_list_uninitialized( /// Runs `omdb nexus sleds add` async fn cmd_nexus_sled_add( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SledAddArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -3827,7 +3830,7 @@ async fn cmd_nexus_sled_add( /// Runs `omdb nexus sleds expunge` async fn cmd_nexus_sled_expunge( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SledExpungeArgs, omdb: &Omdb, log: &slog::Logger, @@ -3849,7 +3852,7 @@ async fn cmd_nexus_sled_expunge( // `omdb nexus sleds expunge`, but borrowing a datastore async fn cmd_nexus_sled_expunge_with_datastore( datastore: &Arc, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SledExpungeArgs, log: &slog::Logger, _destruction_token: DestructiveOperationToken, @@ -3938,7 +3941,7 @@ async fn cmd_nexus_sled_expunge_with_datastore( /// Runs `omdb nexus sleds expunge-disk` async fn cmd_nexus_sled_expunge_disk( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &DiskExpungeArgs, omdb: &Omdb, log: &slog::Logger, @@ -3959,7 +3962,7 @@ async fn cmd_nexus_sled_expunge_disk( async fn cmd_nexus_sled_expunge_disk_with_datastore( datastore: &Arc, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &DiskExpungeArgs, log: &slog::Logger, _destruction_token: DestructiveOperationToken, @@ -4072,7 +4075,7 @@ async fn cmd_nexus_sled_expunge_disk_with_datastore( /// Runs `omdb nexus support-bundles list` async fn cmd_nexus_support_bundles_list( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let support_bundle_stream = client.support_bundle_list_stream(None, None); @@ -4111,13 +4114,15 @@ async fn cmd_nexus_support_bundles_list( /// Runs `omdb nexus support-bundles create` async fn cmd_nexus_support_bundles_create( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { let support_bundle_id = client - .support_bundle_create(&nexus_client::types::SupportBundleCreate { - user_comment: None, - }) + .support_bundle_create( + &nexus_lockstep_client::types::SupportBundleCreate { + user_comment: None, + }, + ) .await .context("creating support bundle")? .into_inner() @@ -4128,7 +4133,7 @@ async fn cmd_nexus_support_bundles_create( /// Runs `omdb nexus support-bundles delete` async fn cmd_nexus_support_bundles_delete( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SupportBundleDeleteArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { @@ -4158,7 +4163,7 @@ async fn write_stream_to_sink( // // "range" is in bytes, and is inclusive on both sides. async fn support_bundle_download_range( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, id: SupportBundleUuid, range: (u64, u64), ) -> anyhow::Result>> { @@ -4177,7 +4182,7 @@ async fn support_bundle_download_range( // Starts the download at "start" bytes (inclusive) and continues up to "end" // bytes (exclusive). fn support_bundle_download_ranges( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, id: SupportBundleUuid, start: u64, end: u64, @@ -4206,7 +4211,7 @@ fn support_bundle_download_ranges( /// Runs `omdb nexus support-bundles download` async fn cmd_nexus_support_bundles_download( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SupportBundleDownloadArgs, ) -> Result<(), anyhow::Error> { let total_length = client @@ -4244,7 +4249,7 @@ async fn cmd_nexus_support_bundles_download( /// Runs `omdb nexus support-bundles get-index` async fn cmd_nexus_support_bundles_get_index( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SupportBundleIndexArgs, ) -> Result<(), anyhow::Error> { let stream = client @@ -4264,7 +4269,7 @@ async fn cmd_nexus_support_bundles_get_index( /// Runs `omdb nexus support-bundles get-file` async fn cmd_nexus_support_bundles_get_file( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SupportBundleFileArgs, ) -> Result<(), anyhow::Error> { let stream = client @@ -4296,7 +4301,7 @@ async fn cmd_nexus_support_bundles_get_file( /// Runs `omdb nexus support-bundles inspect` async fn cmd_nexus_support_bundles_inspect( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &SupportBundleInspectArgs, ) -> Result<(), anyhow::Error> { let accessor: Box = match (args.id, &args.path) { diff --git a/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs b/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs index 73720272521..7d2a16a5e64 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus/quiesce.rs @@ -11,10 +11,10 @@ use chrono::TimeDelta; use chrono::Utc; use clap::Args; use clap::Subcommand; -use nexus_client::types::PendingRecovery; -use nexus_client::types::QuiesceState; -use nexus_client::types::QuiesceStatus; -use nexus_client::types::SagaQuiesceStatus; +use nexus_lockstep_client::types::PendingRecovery; +use nexus_lockstep_client::types::QuiesceState; +use nexus_lockstep_client::types::QuiesceStatus; +use nexus_lockstep_client::types::SagaQuiesceStatus; use std::time::Duration; #[derive(Debug, Args)] @@ -41,7 +41,7 @@ pub struct QuiesceShowArgs { pub async fn cmd_nexus_quiesce( omdb: &Omdb, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &QuiesceArgs, ) -> Result<(), anyhow::Error> { match &args.command { @@ -54,7 +54,7 @@ pub async fn cmd_nexus_quiesce( } async fn quiesce_show( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &QuiesceShowArgs, ) -> Result<(), anyhow::Error> { let now = Utc::now(); @@ -237,7 +237,7 @@ async fn quiesce_show( } async fn quiesce_start( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, _token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { client.quiesce_start().await.context("quiescing Nexus")?; diff --git a/dev-tools/omdb/src/bin/omdb/nexus/reconfigurator_config.rs b/dev-tools/omdb/src/bin/omdb/nexus/reconfigurator_config.rs index 2b734a81cca..c033e12bbdf 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus/reconfigurator_config.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus/reconfigurator_config.rs @@ -113,7 +113,7 @@ impl FromStr for ReconfiguratorConfigVersionOrCurrent { pub async fn cmd_nexus_reconfigurator_config( omdb: &Omdb, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &ReconfiguratorConfigArgs, ) -> Result<(), anyhow::Error> { match &args.command { @@ -127,7 +127,7 @@ pub async fn cmd_nexus_reconfigurator_config( } } async fn reconfigurator_config_show( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &ReconfiguratorConfigShowArgs, ) -> Result<(), anyhow::Error> { let res = match args.version { @@ -161,7 +161,7 @@ async fn reconfigurator_config_show( } async fn reconfigurator_config_set( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, args: &ReconfiguratorConfigSetArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { diff --git a/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs b/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs index 8cc6a8252a3..fbd524e48bb 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus/update_status.rs @@ -16,7 +16,7 @@ use tabled::Tabled; /// Runs `omdb nexus update-status` pub async fn cmd_nexus_update_status( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result<(), anyhow::Error> { let status = client .update_status() diff --git a/dev-tools/omdb/src/bin/omdb/support_bundle.rs b/dev-tools/omdb/src/bin/omdb/support_bundle.rs index 05c0e467d62..2d939790455 100644 --- a/dev-tools/omdb/src/bin/omdb/support_bundle.rs +++ b/dev-tools/omdb/src/bin/omdb/support_bundle.rs @@ -14,8 +14,8 @@ use camino::Utf8PathBuf; use futures::Stream; use futures::StreamExt; use futures::TryStreamExt; -use nexus_client::types::SupportBundleInfo; -use nexus_client::types::SupportBundleState; +use nexus_lockstep_client::types::SupportBundleInfo; +use nexus_lockstep_client::types::SupportBundleState; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SupportBundleUuid; use std::io; @@ -31,7 +31,7 @@ use tokio::io::AsyncRead; use tokio::io::ReadBuf; pub struct StreamedFile<'a> { - client: &'a nexus_client::Client, + client: &'a nexus_lockstep_client::Client, id: SupportBundleUuid, path: Utf8PathBuf, stream: Option> + Send>>>, @@ -40,7 +40,7 @@ pub struct StreamedFile<'a> { impl<'a> StreamedFile<'a> { fn new( - client: &'a nexus_client::Client, + client: &'a nexus_lockstep_client::Client, id: SupportBundleUuid, path: Utf8PathBuf, ) -> Self { @@ -111,13 +111,13 @@ impl AsyncRead for StreamedFile<'_> { /// Access to a support bundle from the internal API pub struct InternalApiAccess<'a> { - client: &'a nexus_client::Client, + client: &'a nexus_lockstep_client::Client, id: SupportBundleUuid, } impl<'a> InternalApiAccess<'a> { pub fn new( - client: &'a nexus_client::Client, + client: &'a nexus_lockstep_client::Client, id: SupportBundleUuid, ) -> Self { Self { client, id } @@ -170,7 +170,7 @@ impl<'c> SupportBundleAccessor for InternalApiAccess<'c> { } async fn wait_for_bundle_to_be_collected( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, id: SupportBundleUuid, ) -> Result { let mut printed_wait_msg = false; @@ -207,7 +207,7 @@ async fn wait_for_bundle_to_be_collected( /// /// If a bundle is being collected, waits for it. pub async fn access_bundle_from_id( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, id: Option, ) -> Result, anyhow::Error> { let id = match id { diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 99d46b935c9..3aaffbf98ec 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -136,8 +136,8 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let cmd_path = path_to_executable(CMD_OMDB); let postgres_url = cptestctx.database.listen_url(); - let nexus_internal_url = - format!("http://{}/", cptestctx.internal_client.bind_address); + let nexus_lockstep_url = + format!("http://{}/", cptestctx.lockstep_client.bind_address); let mgs_url = cptestctx .gateway .get(&SwitchLocation::Switch0) @@ -156,7 +156,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { // Get the CockroachDB metadata from the blueprint so we can redact it let initial_blueprint: Blueprint = dropshot::test_util::read_json( &mut cptestctx - .internal_client + .lockstep_client .make_request_no_body( Method::GET, &format!("/deployment/blueprints/all/{initial_blueprint_id}"), @@ -306,7 +306,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { for args in invocations { println!("running commands with args: {:?}", args); let p = postgres_url.to_string(); - let u = nexus_internal_url.clone(); + let u = nexus_lockstep_url.clone(); let g = mgs_url.clone(); let ox = ox_url.clone(); let ch = ch_url.clone(); @@ -396,8 +396,8 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { let cmd_path = path_to_executable(CMD_OMDB); let postgres_url = cptestctx.database.listen_url().to_string(); - let nexus_internal_url = - format!("http://{}", cptestctx.internal_client.bind_address); + let nexus_lockstep_url = + format!("http://{}", cptestctx.lockstep_client.bind_address); let ox_url = format!("http://{}/", cptestctx.oximeter.server_address()); let ox_test_producer = cptestctx.producer.address().ip(); let ch_url = format!("http://{}/", cptestctx.clickhouse.http_address()); @@ -425,7 +425,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { let args = &[ "nexus", "--nexus-internal-url", - &nexus_internal_url.clone(), + &nexus_lockstep_url.clone(), "background-tasks", "doc", ]; @@ -434,7 +434,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { // Case 2: specified in multiple places (command-line argument wins) let args = &["nexus", "--nexus-internal-url", "junk", "background-tasks", "doc"]; - let n = nexus_internal_url.clone(); + let n = nexus_lockstep_url.clone(); do_run( &mut output, move |exec| exec.env("OMDB_NEXUS_URL", &n), diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 869df0c426f..704e2cd2591 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -866,9 +866,10 @@ Options: -h, --help Print help Connection Options: - --nexus-internal-url URL of the Nexus internal API [env: - OMDB_NEXUS_URL=] - --dns-server [env: OMDB_DNS_SERVER=] + --nexus-internal-url + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] Safety Options: -w, --destructive Allow potentially-destructive subcommands @@ -897,9 +898,10 @@ Options: -h, --help Print help Connection Options: - --nexus-internal-url URL of the Nexus internal API [env: - OMDB_NEXUS_URL=] - --dns-server [env: OMDB_DNS_SERVER=] + --nexus-internal-url + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] Safety Options: -w, --destructive Allow potentially-destructive subcommands @@ -942,7 +944,7 @@ Options: Connection Options: --nexus-internal-url - URL of the Nexus internal API + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] @@ -981,9 +983,10 @@ Options: -h, --help Print help Connection Options: - --nexus-internal-url URL of the Nexus internal API [env: - OMDB_NEXUS_URL=] - --dns-server [env: OMDB_DNS_SERVER=] + --nexus-internal-url + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] Safety Options: -w, --destructive Allow potentially-destructive subcommands @@ -1010,9 +1013,10 @@ Options: -h, --help Print help Connection Options: - --nexus-internal-url URL of the Nexus internal API [env: - OMDB_NEXUS_URL=] - --dns-server [env: OMDB_DNS_SERVER=] + --nexus-internal-url + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] Safety Options: -w, --destructive Allow potentially-destructive subcommands @@ -1049,9 +1053,10 @@ Options: -h, --help Print help Connection Options: - --nexus-internal-url URL of the Nexus internal API [env: - OMDB_NEXUS_URL=] - --dns-server [env: OMDB_DNS_SERVER=] + --nexus-internal-url + URL of the Nexus internal lockstep API [env: OMDB_NEXUS_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] Safety Options: -w, --destructive Allow potentially-destructive subcommands diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index c3010bb5d21..1854484a857 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -8,19 +8,31 @@ license = "MPL-2.0" workspace = true [dependencies] +anstyle.workspace = true anyhow = { workspace = true, features = ["backtrace"] } async-trait.workspace = true base64.workspace = true bytes.workspace = true chrono.workspace = true -http.workspace = true +clap.workspace = true +colored.workspace = true +# On Git commit for trust-dns -> hickory switch. +# Switch back to released versions of dhcproto on next release. +dhcproto = { git = "https://github.com/bluecatengineering/dhcproto.git", rev = "120da6fcd8a7be84d417d372634ead84ce07e6da" } futures.workspace = true +hickory-resolver.workspace = true +http.workspace = true +humantime.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true -nexus-client.workspace = true +internet-checksum.workspace = true +ispf.workspace = true +macaddr.workspace = true +nexus-lockstep-client.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true omicron-uuid-kinds.workspace = true +omicron-workspace-hack.workspace = true oxide-client.workspace = true oxide-tokio-rt.workspace = true rand.workspace = true @@ -32,21 +44,8 @@ serde_json.workspace = true sled-agent-types.workspace = true slog.workspace = true slog-error-chain.workspace = true +socket2.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true -hickory-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack.workspace = true -ispf.workspace = true -internet-checksum.workspace = true -humantime.workspace = true -socket2.workspace = true -colored.workspace = true -anstyle.workspace = true -clap.workspace = true -macaddr.workspace = true - -# On Git commit for trust-dns -> hickory switch. -# Switch back to released versions of dhcproto on next release. -dhcproto = { git = "https://github.com/bluecatengineering/dhcproto.git", rev = "120da6fcd8a7be84d417d372634ead84ce07e6da" } diff --git a/end-to-end-tests/src/noop_blueprint.rs b/end-to-end-tests/src/noop_blueprint.rs index c64981fe54d..3f24d3ee287 100644 --- a/end-to-end-tests/src/noop_blueprint.rs +++ b/end-to-end-tests/src/noop_blueprint.rs @@ -5,7 +5,7 @@ use internal_dns_resolver::Resolver; use internal_dns_types::names::ServiceName; -use nexus_client::Client as NexusClient; +use nexus_lockstep_client::Client as NexusClient; use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::GenericUuid; @@ -95,7 +95,10 @@ enum MakeNexusError { #[error("looking up Nexus IP in internal DNS")] Resolve(#[from] internal_dns_resolver::ResolveError), #[error("making request to Nexus")] - Request(#[from] nexus_client::Error), + Request( + #[from] + nexus_lockstep_client::Error, + ), } /// Make one attempt to look up the IP of Nexus in internal DNS and make an HTTP @@ -109,7 +112,8 @@ async fn make_nexus_client( log: &slog::Logger, ) -> Result { debug!(log, "doing DNS lookup for Nexus"); - let nexus_ip = resolver.lookup_socket_v6(ServiceName::Nexus).await?; + let nexus_ip = + resolver.lookup_socket_v6(ServiceName::NexusLockstep).await?; let url = format!("http://{}", nexus_ip); debug!(log, "found Nexus IP"; "nexus_ip" => %nexus_ip, "url" => &url); diff --git a/live-tests/Cargo.toml b/live-tests/Cargo.toml index 7961e3b87f2..41c896bd0a9 100644 --- a/live-tests/Cargo.toml +++ b/live-tests/Cargo.toml @@ -20,11 +20,11 @@ futures.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true live-tests-macros.workspace = true -nexus-client.workspace = true nexus-config.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-inventory.workspace = true +nexus-lockstep-client.workspace = true nexus-reconfigurator-planning.workspace = true nexus-reconfigurator-preparation.workspace = true nexus-sled-agent-shared.workspace = true diff --git a/live-tests/tests/common/mod.rs b/live-tests/tests/common/mod.rs index 40ddbb14fe8..6bbbaf9b564 100644 --- a/live-tests/tests/common/mod.rs +++ b/live-tests/tests/common/mod.rs @@ -73,20 +73,20 @@ impl LiveTestContext { pub fn specific_internal_nexus_client( &self, sockaddr: SocketAddrV6, - ) -> nexus_client::Client { + ) -> nexus_lockstep_client::Client { let url = format!("http://{}", sockaddr); let log = self.logctx.log.new(o!("nexus_internal_url" => url.clone())); - nexus_client::Client::new(&url, log) + nexus_lockstep_client::Client::new(&url, log) } /// Returns a list of clients for the internal APIs for all Nexus instances /// found in DNS pub async fn all_internal_nexus_clients( &self, - ) -> Result, anyhow::Error> { + ) -> Result, anyhow::Error> { Ok(self .resolver - .lookup_all_socket_v6(ServiceName::Nexus) + .lookup_all_socket_v6(ServiceName::NexusLockstep) .await .context("looking up Nexus in internal DNS")? .into_iter() diff --git a/live-tests/tests/common/reconfigurator.rs b/live-tests/tests/common/reconfigurator.rs index 57247600053..1f1aa5e212d 100644 --- a/live-tests/tests/common/reconfigurator.rs +++ b/live-tests/tests/common/reconfigurator.rs @@ -5,9 +5,11 @@ //! Helpers common to Reconfigurator tests use anyhow::{Context, anyhow, bail, ensure}; -use nexus_client::types::{BackgroundTasksActivateRequest, BlueprintTargetSet}; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use nexus_lockstep_client::types::{ + BackgroundTasksActivateRequest, BlueprintTargetSet, +}; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::PlannerRng; use nexus_types::deployment::{Blueprint, BlueprintSource, PlanningInput}; @@ -25,7 +27,7 @@ use std::time::Duration; /// don't want to proceed with tests. pub async fn blueprint_load_target_enabled( log: &slog::Logger, - nexus: &nexus_client::Client, + nexus: &nexus_lockstep_client::Client, ) -> Result { // Fetch the current target configuration. info!(log, "editing current target blueprint"); @@ -80,7 +82,7 @@ pub async fn blueprint_edit_current_target( log: &slog::Logger, planning_input: &PlanningInput, collection: &Collection, - nexus: &nexus_client::Client, + nexus: &nexus_lockstep_client::Client, edit_fn: &dyn Fn(&mut BlueprintBuilder) -> Result<(), anyhow::Error>, ) -> Result<(Blueprint, Blueprint), anyhow::Error> { // Fetch the current target configuration. @@ -183,7 +185,7 @@ pub async fn blueprint_wait_sled_configs_propagated( opctx: &OpContext, datastore: &DataStore, blueprint: &Blueprint, - nexus: &nexus_client::Client, + nexus: &nexus_lockstep_client::Client, timeout: Duration, ) -> Result { wait_for_condition( diff --git a/live-tests/tests/test_nexus_add_remove.rs b/live-tests/tests/test_nexus_add_remove.rs index 2cf7acfb8d1..405c06ecc92 100644 --- a/live-tests/tests/test_nexus_add_remove.rs +++ b/live-tests/tests/test_nexus_add_remove.rs @@ -10,11 +10,11 @@ use common::LiveTestContext; use common::reconfigurator::blueprint_edit_current_target; use futures::TryStreamExt; use live_tests_macros::live_test; -use nexus_client::types::BlueprintTargetSet; -use nexus_client::types::QuiesceState; -use nexus_client::types::Saga; -use nexus_client::types::SagaState; use nexus_inventory::CollectionBuilder; +use nexus_lockstep_client::types::BlueprintTargetSet; +use nexus_lockstep_client::types::QuiesceState; +use nexus_lockstep_client::types::Saga; +use nexus_lockstep_client::types::SagaState; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_planning::planner::PlannerRng; @@ -210,7 +210,7 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { wait_for_condition( || async { match new_zone_client.saga_list(None, None, None).await { - Err(nexus_client::Error::CommunicationError(error)) => { + Err(nexus_lockstep_client::Error::CommunicationError(error)) => { info!(log, "expunged Nexus no longer reachable"; "error" => slog_error_chain::InlineErrorChain::new(&error), ); @@ -374,7 +374,7 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { } async fn list_sagas( - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, ) -> Result, anyhow::Error> { client .saga_list_stream(None, None) diff --git a/live-tests/tests/test_nexus_handoff.rs b/live-tests/tests/test_nexus_handoff.rs index f27ec3f242e..4d87a6d4a28 100644 --- a/live-tests/tests/test_nexus_handoff.rs +++ b/live-tests/tests/test_nexus_handoff.rs @@ -10,8 +10,8 @@ use anyhow::Context; use common::LiveTestContext; use common::reconfigurator::blueprint_edit_current_target; use live_tests_macros::live_test; -use nexus_client::types::QuiesceState; use nexus_db_model::DbMetadataNexusState; +use nexus_lockstep_client::types::QuiesceState; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_preparation::PlanningInputFromDb; use nexus_types::deployment::BlueprintZoneDisposition; diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index eb509439521..92846019f62 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -151,14 +151,17 @@ criterion.workspace = true diesel.workspace = true dns-server.workspace = true expectorate.workspace = true -hyper-rustls.workspace = true gateway-messages.workspace = true gateway-test-utils.workspace = true +hickory-resolver.workspace = true +httpmock.workspace = true +httptest.workspace = true hubtools.workspace = true +hyper-rustls.workspace = true nexus-db-queries = { workspace = true, features = ["testing"] } -nexus-client.workspace = true -nexus-test-utils-macros.workspace = true +nexus-lockstep-client.workspace = true nexus-test-utils.workspace = true +nexus-test-utils-macros.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true openapi-lint.workspace = true @@ -170,18 +173,15 @@ petgraph.workspace = true pretty_assertions.workspace = true rcgen.workspace = true regex.workspace = true +rustls.workspace = true similar-asserts.workspace = true sp-sim.workspace = true -rustls.workspace = true +strum.workspace = true subprocess.workspace = true term.workspace = true -hickory-resolver.workspace = true tufaceous.workspace = true -tufaceous-lib.workspace = true -httptest.workspace = true -httpmock.workspace = true -strum.workspace = true tufaceous-artifact.workspace = true +tufaceous-lib.workspace = true [[bench]] name = "setup_benchmark" diff --git a/nexus/internal-api/src/lib.rs b/nexus/internal-api/src/lib.rs index eeafb17dae9..6481db1c903 100644 --- a/nexus/internal-api/src/lib.rs +++ b/nexus/internal-api/src/lib.rs @@ -2,42 +2,28 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeSet; use dropshot::{ - Body, Header, HttpError, HttpResponseCreated, HttpResponseDeleted, - HttpResponseOk, HttpResponseUpdatedNoContent, Path, Query, RequestContext, - ResultsPage, TypedBody, + HttpError, HttpResponseCreated, HttpResponseOk, + HttpResponseUpdatedNoContent, Path, Query, RequestContext, ResultsPage, + TypedBody, }; -use http::Response; use nexus_types::{ - deployment::{ - Blueprint, BlueprintMetadata, BlueprintTarget, BlueprintTargetSet, - ClickhousePolicy, OximeterReadPolicy, ReconfiguratorConfigParam, - ReconfiguratorConfigView, - }, external_api::{ - headers::RangeRequest, - params::{self, PhysicalDiskPath, SledSelector, UninitializedSledId}, - shared::{self, ProbeInfo, UninitializedSled}, - views::{Ping, PingStatus, SledPolicy}, + shared::ProbeInfo, + views::{Ping, PingStatus}, }, internal_api::{ params::{ - InstanceMigrateRequest, OximeterInfo, RackInitializationRequest, - SledAgentInfo, SwitchPutRequest, SwitchPutResponse, - }, - views::{ - BackgroundTask, DemoSaga, MgsUpdateDriverStatus, NatEntryView, - QuiesceStatus, Saga, UpdateStatus, + OximeterInfo, RackInitializationRequest, SledAgentInfo, + SwitchPutRequest, SwitchPutResponse, }, + views::NatEntryView, }, }; use omicron_common::api::{ - external::{ - Instance, - http_pagination::{PaginatedById, PaginatedByTimeAndId}, - }, + external::http_pagination::PaginatedById, internal::nexus::{ DiskRuntimeState, DownstairsClientStopRequest, DownstairsClientStopped, ProducerEndpoint, ProducerRegistrationResponse, RepairFinishInfo, @@ -138,16 +124,6 @@ pub trait NexusInternalApi { new_runtime_state: TypedBody, ) -> Result; - #[endpoint { - method = POST, - path = "/instances/{instance_id}/migrate", - }] - async fn instance_migrate( - rqctx: RequestContext, - path_params: Path, - migrate_params: TypedBody, - ) -> Result, HttpError>; - /// Report updated state for a disk. #[endpoint { method = PUT, @@ -280,81 +256,11 @@ pub trait NexusInternalApi { downstairs_client_stopped: TypedBody, ) -> Result; - // Debug interfaces for sagas - - /// List sagas - #[endpoint { - method = GET, - path = "/sagas", - }] - async fn saga_list( - rqctx: RequestContext, - query_params: Query, - ) -> Result>, HttpError>; - - /// Fetch a saga - #[endpoint { - method = GET, - path = "/sagas/{saga_id}", - }] - async fn saga_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; - - /// Kick off an instance of the "demo" saga + /// **Do not use in new code!** /// - /// This saga is used for demo and testing. The saga just waits until you - /// complete using the `saga_demo_complete` API. - #[endpoint { - method = POST, - path = "/demo-saga", - }] - async fn saga_demo_create( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Complete a waiting demo saga - /// - /// Note that the id used here is not the same as the id of the saga. It's - /// the one returned by the `saga_demo_create` API. - #[endpoint { - method = POST, - path = "/demo-saga/{demo_saga_id}/complete", - }] - async fn saga_demo_complete( - rqctx: RequestContext, - path_params: Path, - ) -> Result; - - // Debug interfaces for background Tasks - - /// List background tasks - /// - /// This is a list of discrete background activities that Nexus carries out. - /// This is exposed for support and debugging. - #[endpoint { - method = GET, - path = "/bgtasks", - }] - async fn bgtask_list( - rqctx: RequestContext, - ) -> Result>, HttpError>; - - /// Fetch status of one background task - /// - /// This is exposed for support and debugging. - #[endpoint { - method = GET, - path = "/bgtasks/view/{bgtask_name}", - }] - async fn bgtask_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; - - /// Activates one or more background tasks, causing them to be run immediately - /// if idle, or scheduled to run again as soon as possible if already running. + /// Callers to this API should either be capable of using the nexus-lockstep + /// API or should be rewritten to use a doorbell API to activate a specific + /// task. Task names are internal to Nexus. #[endpoint { method = POST, path = "/bgtasks/activate", @@ -364,17 +270,6 @@ pub trait NexusInternalApi { body: TypedBody, ) -> Result; - // Debug interfaces for ongoing MGS updates - - /// Fetch information about ongoing MGS updates - #[endpoint { - method = GET, - path = "/mgs-updates", - }] - async fn mgs_updates( - rqctx: RequestContext, - ) -> Result, HttpError>; - // NAT RPW internal APIs /// Fetch NAT ChangeSet @@ -394,301 +289,6 @@ pub trait NexusInternalApi { query_params: Query, ) -> Result>, HttpError>; - // APIs for managing blueprints - // - // These are not (yet) intended for use by any other programs. Eventually, we - // will want this functionality part of the public API. But we don't want to - // commit to any of this yet. These properly belong in an RFD 399-style - // "Service and Support API". Absent that, we stick them here. - - /// Lists blueprints - #[endpoint { - method = GET, - path = "/deployment/blueprints/all", - }] - async fn blueprint_list( - rqctx: RequestContext, - query_params: Query, - ) -> Result>, HttpError>; - - /// Fetches one blueprint - #[endpoint { - method = GET, - path = "/deployment/blueprints/all/{blueprint_id}", - }] - async fn blueprint_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; - - /// Deletes one blueprint - #[endpoint { - method = DELETE, - path = "/deployment/blueprints/all/{blueprint_id}", - }] - async fn blueprint_delete( - rqctx: RequestContext, - path_params: Path, - ) -> Result; - - // Managing the current target blueprint - - /// Fetches the current target blueprint, if any - #[endpoint { - method = GET, - path = "/deployment/blueprints/target", - }] - async fn blueprint_target_view( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Make the specified blueprint the new target - #[endpoint { - method = POST, - path = "/deployment/blueprints/target", - }] - async fn blueprint_target_set( - rqctx: RequestContext, - target: TypedBody, - ) -> Result, HttpError>; - - /// Set the `enabled` field of the current target blueprint - #[endpoint { - method = PUT, - path = "/deployment/blueprints/target/enabled", - }] - async fn blueprint_target_set_enabled( - rqctx: RequestContext, - target: TypedBody, - ) -> Result, HttpError>; - - // Generating blueprints - - /// Generates a new blueprint for the current system, re-evaluating anything - /// that's changed since the last one was generated - #[endpoint { - method = POST, - path = "/deployment/blueprints/regenerate", - }] - async fn blueprint_regenerate( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Imports a client-provided blueprint - /// - /// This is intended for development and support, not end users or operators. - #[endpoint { - method = POST, - path = "/deployment/blueprints/import", - }] - async fn blueprint_import( - rqctx: RequestContext, - blueprint: TypedBody, - ) -> Result; - - /// Get the current reconfigurator configuration - #[endpoint { - method = GET, - path = "/deployment/reconfigurator-config" - }] - async fn reconfigurator_config_show_current( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Get the reconfigurator config at `version` if it exists - #[endpoint { - method = GET, - path = "/deployment/reconfigurator-config/{version}" - }] - async fn reconfigurator_config_show( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; - - /// Update the reconfigurator config at the latest versions - #[endpoint { - method = POST, - path = "/deployment/reconfigurator-config" - }] - async fn reconfigurator_config_set( - rqctx: RequestContext, - switches: TypedBody, - ) -> Result; - - /// Show deployed versions of artifacts - #[endpoint { - method = GET, - path = "/deployment/update-status" - }] - async fn update_status( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// List uninitialized sleds - #[endpoint { - method = GET, - path = "/sleds/uninitialized", - }] - async fn sled_list_uninitialized( - rqctx: RequestContext, - ) -> Result>, HttpError>; - - /// Add sled to initialized rack - // - // TODO: In the future this should really be a PUT request, once we resolve - // https://github.com/oxidecomputer/omicron/issues/4494. It should also - // explicitly be tied to a rack via a `rack_id` path param. For now we assume - // we are only operating on single rack systems. - #[endpoint { - method = POST, - path = "/sleds/add", - }] - async fn sled_add( - rqctx: RequestContext, - sled: TypedBody, - ) -> Result, HttpError>; - - /// Mark a sled as expunged - /// - /// This is an irreversible process! It should only be called after - /// sufficient warning to the operator. - /// - /// This is idempotent, and it returns the old policy of the sled. - #[endpoint { - method = POST, - path = "/sleds/expunge", - }] - async fn sled_expunge( - rqctx: RequestContext, - sled: TypedBody, - ) -> Result, HttpError>; - - /// Mark a physical disk as expunged - /// - /// This is an irreversible process! It should only be called after - /// sufficient warning to the operator. - /// - /// This is idempotent. - #[endpoint { - method = POST, - path = "/physical-disk/expunge", - }] - async fn physical_disk_expunge( - rqctx: RequestContext, - disk: TypedBody, - ) -> Result; - - // Support bundles (experimental) - - /// List all support bundles - #[endpoint { - method = GET, - path = "/experimental/v1/system/support-bundles", - }] - async fn support_bundle_list( - rqctx: RequestContext, - query_params: Query, - ) -> Result>, HttpError>; - - /// View a support bundle - #[endpoint { - method = GET, - path = "/experimental/v1/system/support-bundles/{bundle_id}", - }] - async fn support_bundle_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError>; - - /// Download the index of a support bundle - #[endpoint { - method = GET, - path = "/experimental/v1/system/support-bundles/{bundle_id}/index", - }] - async fn support_bundle_index( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError>; - - /// Download the contents of a support bundle - #[endpoint { - method = GET, - path = "/experimental/v1/system/support-bundles/{bundle_id}/download", - }] - async fn support_bundle_download( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError>; - - /// Download a file within a support bundle - #[endpoint { - method = GET, - path = "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}", - }] - async fn support_bundle_download_file( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError>; - - /// Download the metadata of a support bundle - #[endpoint { - method = HEAD, - path = "/experimental/v1/system/support-bundles/{bundle_id}/download", - }] - async fn support_bundle_head( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError>; - - /// Download the metadata of a file within the support bundle - #[endpoint { - method = HEAD, - path = "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}", - }] - async fn support_bundle_head_file( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError>; - - /// Create a new support bundle - #[endpoint { - method = POST, - path = "/experimental/v1/system/support-bundles", - }] - async fn support_bundle_create( - rqctx: RequestContext, - body: TypedBody, - ) -> Result, HttpError>; - - /// Delete an existing support bundle - /// - /// May also be used to cancel a support bundle which is currently being - /// collected, or to remove metadata for a support bundle that has failed. - #[endpoint { - method = DELETE, - path = "/experimental/v1/system/support-bundles/{bundle_id}", - }] - async fn support_bundle_delete( - rqctx: RequestContext, - path_params: Path, - ) -> Result; - - /// Update a support bundle - #[endpoint { - method = PUT, - path = "/experimental/v1/system/support-bundles/{bundle_id}", - }] - async fn support_bundle_update( - rqctx: RequestContext, - path_params: Path, - body: TypedBody, - ) -> Result, HttpError>; - /// Get all the probes associated with a given sled. #[endpoint { method = GET, @@ -699,66 +299,6 @@ pub trait NexusInternalApi { path_params: Path, query_params: Query, ) -> Result>, HttpError>; - - /// Get the current clickhouse policy - #[endpoint { - method = GET, - path = "/clickhouse/policy" - }] - async fn clickhouse_policy_get( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Set the new clickhouse policy - #[endpoint { - method = POST, - path = "/clickhouse/policy" - }] - async fn clickhouse_policy_set( - rqctx: RequestContext, - policy: TypedBody, - ) -> Result; - - /// Get the current oximeter read policy - #[endpoint { - method = GET, - path = "/oximeter/read-policy" - }] - async fn oximeter_read_policy_get( - rqctx: RequestContext, - ) -> Result, HttpError>; - - /// Set the new oximeter read policy - #[endpoint { - method = POST, - path = "/oximeter/read-policy" - }] - async fn oximeter_read_policy_set( - rqctx: RequestContext, - policy: TypedBody, - ) -> Result; - - /// Begin quiescing this Nexus instance - /// - /// This causes no new sagas to be started and eventually causes no database - /// connections to become available. This is a one-way trip. There's no - /// unquiescing Nexus. - #[endpoint { - method = POST, - path = "/quiesce" - }] - async fn quiesce_start( - rqctx: RequestContext, - ) -> Result; - - /// Check whether Nexus is running normally, quiescing, or fully quiesced. - #[endpoint { - method = GET, - path = "/quiesce" - }] - async fn quiesce_get( - rqctx: RequestContext, - ) -> Result, HttpError>; } /// Path parameters for Sled Agent requests (internal API) @@ -791,13 +331,6 @@ pub struct SwitchPathParam { pub switch_id: Uuid, } -/// Path parameters for Instance requests (internal API) -#[derive(Deserialize, JsonSchema)] -pub struct InstancePathParam { - #[schemars(with = "Uuid")] - pub instance_id: InstanceUuid, -} - /// Path parameters for VMM requests (internal API) #[derive(Deserialize, JsonSchema)] pub struct VmmPathParam { @@ -830,25 +363,6 @@ pub struct UpstairsDownstairsPathParam { pub downstairs_id: TypedUuid, } -/// Path parameters for Saga requests -#[derive(Deserialize, JsonSchema)] -pub struct SagaPathParam { - #[serde(rename = "saga_id")] - pub saga_id: Uuid, -} - -/// Path parameters for DemoSaga requests -#[derive(Deserialize, JsonSchema)] -pub struct DemoSagaPathParam { - pub demo_saga_id: DemoSagaUuid, -} - -/// Path parameters for Background Task requests -#[derive(Deserialize, JsonSchema)] -pub struct BackgroundTaskPathParam { - pub bgtask_name: String, -} - /// Query parameters for Background Task activation requests. #[derive(Deserialize, JsonSchema)] pub struct BackgroundTasksActivateRequest { @@ -880,8 +394,3 @@ pub struct ProbePathParam { #[schemars(with = "Uuid")] pub sled: SledUuid, } - -#[derive(Deserialize, JsonSchema)] -pub struct VersionPathParam { - pub version: u32, -} diff --git a/nexus/lockstep-api/Cargo.toml b/nexus/lockstep-api/Cargo.toml index 06ce4b373bf..1ec8dffb5b8 100644 --- a/nexus/lockstep-api/Cargo.toml +++ b/nexus/lockstep-api/Cargo.toml @@ -9,5 +9,11 @@ workspace = true [dependencies] dropshot.workspace = true +http.workspace = true nexus-types.workspace = true +omicron-common.workspace = true +omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true +uuid.workspace = true diff --git a/nexus/lockstep-api/src/lib.rs b/nexus/lockstep-api/src/lib.rs index 5d41943a1c4..43b54ac9260 100644 --- a/nexus/lockstep-api/src/lib.rs +++ b/nexus/lockstep-api/src/lib.rs @@ -2,8 +2,55 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use dropshot::{HttpError, HttpResponseOk, RequestContext}; -use nexus_types::external_api::views::{Ping, PingStatus}; +use std::collections::BTreeMap; +use std::collections::BTreeSet; + +use dropshot::Body; +use dropshot::Header; +use dropshot::HttpError; +use dropshot::HttpResponseCreated; +use dropshot::HttpResponseDeleted; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::Path; +use dropshot::Query; +use dropshot::RequestContext; +use dropshot::ResultsPage; +use dropshot::TypedBody; +use http::Response; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintMetadata; +use nexus_types::deployment::BlueprintTarget; +use nexus_types::deployment::BlueprintTargetSet; +use nexus_types::deployment::ClickhousePolicy; +use nexus_types::deployment::OximeterReadPolicy; +use nexus_types::deployment::ReconfiguratorConfigParam; +use nexus_types::deployment::ReconfiguratorConfigView; +use nexus_types::external_api::headers::RangeRequest; +use nexus_types::external_api::params; +use nexus_types::external_api::params::PhysicalDiskPath; +use nexus_types::external_api::params::SledSelector; +use nexus_types::external_api::params::UninitializedSledId; +use nexus_types::external_api::shared; +use nexus_types::external_api::shared::UninitializedSled; +use nexus_types::external_api::views::Ping; +use nexus_types::external_api::views::PingStatus; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::internal_api::params::InstanceMigrateRequest; +use nexus_types::internal_api::views::BackgroundTask; +use nexus_types::internal_api::views::DemoSaga; +use nexus_types::internal_api::views::MgsUpdateDriverStatus; +use nexus_types::internal_api::views::QuiesceStatus; +use nexus_types::internal_api::views::Saga; +use nexus_types::internal_api::views::UpdateStatus; +use omicron_common::api::external::Instance; +use omicron_common::api::external::http_pagination::PaginatedById; +use omicron_common::api::external::http_pagination::PaginatedByTimeAndId; +use omicron_uuid_kinds::*; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use uuid::Uuid; #[dropshot::api_description] pub trait NexusLockstepApi { @@ -21,4 +68,505 @@ pub trait NexusLockstepApi { ) -> Result, HttpError> { Ok(HttpResponseOk(Ping { status: PingStatus::Ok })) } + + #[endpoint { + method = POST, + path = "/instances/{instance_id}/migrate", + }] + async fn instance_migrate( + rqctx: RequestContext, + path_params: Path, + migrate_params: TypedBody, + ) -> Result, HttpError>; + + // Debug interfaces for sagas + + /// List sagas + #[endpoint { + method = GET, + path = "/sagas", + }] + async fn saga_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch a saga + #[endpoint { + method = GET, + path = "/sagas/{saga_id}", + }] + async fn saga_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Kick off an instance of the "demo" saga + /// + /// This saga is used for demo and testing. The saga just waits until you + /// complete using the `saga_demo_complete` API. + #[endpoint { + method = POST, + path = "/demo-saga", + }] + async fn saga_demo_create( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Complete a waiting demo saga + /// + /// Note that the id used here is not the same as the id of the saga. It's + /// the one returned by the `saga_demo_create` API. + #[endpoint { + method = POST, + path = "/demo-saga/{demo_saga_id}/complete", + }] + async fn saga_demo_complete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + // Debug interfaces for background Tasks + + /// List background tasks + /// + /// This is a list of discrete background activities that Nexus carries out. + /// This is exposed for support and debugging. + #[endpoint { + method = GET, + path = "/bgtasks", + }] + async fn bgtask_list( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + /// Fetch status of one background task + /// + /// This is exposed for support and debugging. + #[endpoint { + method = GET, + path = "/bgtasks/view/{bgtask_name}", + }] + async fn bgtask_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Activates one or more background tasks, causing them to be run immediately + /// if idle, or scheduled to run again as soon as possible if already running. + #[endpoint { + method = POST, + path = "/bgtasks/activate", + }] + async fn bgtask_activate( + rqctx: RequestContext, + body: TypedBody, + ) -> Result; + + // Debug interfaces for ongoing MGS updates + + /// Fetch information about ongoing MGS updates + #[endpoint { + method = GET, + path = "/mgs-updates", + }] + async fn mgs_updates( + rqctx: RequestContext, + ) -> Result, HttpError>; + + // APIs for managing blueprints + // + // These are not (yet) intended for use by any other programs. Eventually, we + // will want this functionality part of the public API. But we don't want to + // commit to any of this yet. These properly belong in an RFD 399-style + // "Service and Support API". Absent that, we stick them here. + + /// Lists blueprints + #[endpoint { + method = GET, + path = "/deployment/blueprints/all", + }] + async fn blueprint_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetches one blueprint + #[endpoint { + method = GET, + path = "/deployment/blueprints/all/{blueprint_id}", + }] + async fn blueprint_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Deletes one blueprint + #[endpoint { + method = DELETE, + path = "/deployment/blueprints/all/{blueprint_id}", + }] + async fn blueprint_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + // Managing the current target blueprint + + /// Fetches the current target blueprint, if any + #[endpoint { + method = GET, + path = "/deployment/blueprints/target", + }] + async fn blueprint_target_view( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Make the specified blueprint the new target + #[endpoint { + method = POST, + path = "/deployment/blueprints/target", + }] + async fn blueprint_target_set( + rqctx: RequestContext, + target: TypedBody, + ) -> Result, HttpError>; + + /// Set the `enabled` field of the current target blueprint + #[endpoint { + method = PUT, + path = "/deployment/blueprints/target/enabled", + }] + async fn blueprint_target_set_enabled( + rqctx: RequestContext, + target: TypedBody, + ) -> Result, HttpError>; + + // Generating blueprints + + /// Generates a new blueprint for the current system, re-evaluating anything + /// that's changed since the last one was generated + #[endpoint { + method = POST, + path = "/deployment/blueprints/regenerate", + }] + async fn blueprint_regenerate( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Imports a client-provided blueprint + /// + /// This is intended for development and support, not end users or operators. + #[endpoint { + method = POST, + path = "/deployment/blueprints/import", + }] + async fn blueprint_import( + rqctx: RequestContext, + blueprint: TypedBody, + ) -> Result; + + /// Get the current reconfigurator configuration + #[endpoint { + method = GET, + path = "/deployment/reconfigurator-config" + }] + async fn reconfigurator_config_show_current( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Get the reconfigurator config at `version` if it exists + #[endpoint { + method = GET, + path = "/deployment/reconfigurator-config/{version}" + }] + async fn reconfigurator_config_show( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Update the reconfigurator config at the latest versions + #[endpoint { + method = POST, + path = "/deployment/reconfigurator-config" + }] + async fn reconfigurator_config_set( + rqctx: RequestContext, + switches: TypedBody, + ) -> Result; + + /// Show deployed versions of artifacts + #[endpoint { + method = GET, + path = "/deployment/update-status" + }] + async fn update_status( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// List uninitialized sleds + #[endpoint { + method = GET, + path = "/sleds/uninitialized", + }] + async fn sled_list_uninitialized( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + /// Add sled to initialized rack + // + // TODO: In the future this should really be a PUT request, once we resolve + // https://github.com/oxidecomputer/omicron/issues/4494. It should also + // explicitly be tied to a rack via a `rack_id` path param. For now we assume + // we are only operating on single rack systems. + #[endpoint { + method = POST, + path = "/sleds/add", + }] + async fn sled_add( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError>; + + /// Mark a sled as expunged + /// + /// This is an irreversible process! It should only be called after + /// sufficient warning to the operator. + /// + /// This is idempotent, and it returns the old policy of the sled. + #[endpoint { + method = POST, + path = "/sleds/expunge", + }] + async fn sled_expunge( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError>; + + /// Mark a physical disk as expunged + /// + /// This is an irreversible process! It should only be called after + /// sufficient warning to the operator. + /// + /// This is idempotent. + #[endpoint { + method = POST, + path = "/physical-disk/expunge", + }] + async fn physical_disk_expunge( + rqctx: RequestContext, + disk: TypedBody, + ) -> Result; + + // Support bundles (experimental) + + /// List all support bundles + #[endpoint { + method = GET, + path = "/experimental/v1/system/support-bundles", + }] + async fn support_bundle_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// View a support bundle + #[endpoint { + method = GET, + path = "/experimental/v1/system/support-bundles/{bundle_id}", + }] + async fn support_bundle_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Download the index of a support bundle + #[endpoint { + method = GET, + path = "/experimental/v1/system/support-bundles/{bundle_id}/index", + }] + async fn support_bundle_index( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError>; + + /// Download the contents of a support bundle + #[endpoint { + method = GET, + path = "/experimental/v1/system/support-bundles/{bundle_id}/download", + }] + async fn support_bundle_download( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError>; + + /// Download a file within a support bundle + #[endpoint { + method = GET, + path = "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}", + }] + async fn support_bundle_download_file( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError>; + + /// Download the metadata of a support bundle + #[endpoint { + method = HEAD, + path = "/experimental/v1/system/support-bundles/{bundle_id}/download", + }] + async fn support_bundle_head( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError>; + + /// Download the metadata of a file within the support bundle + #[endpoint { + method = HEAD, + path = "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}", + }] + async fn support_bundle_head_file( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError>; + + /// Create a new support bundle + #[endpoint { + method = POST, + path = "/experimental/v1/system/support-bundles", + }] + async fn support_bundle_create( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError>; + + /// Delete an existing support bundle + /// + /// May also be used to cancel a support bundle which is currently being + /// collected, or to remove metadata for a support bundle that has failed. + #[endpoint { + method = DELETE, + path = "/experimental/v1/system/support-bundles/{bundle_id}", + }] + async fn support_bundle_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Update a support bundle + #[endpoint { + method = PUT, + path = "/experimental/v1/system/support-bundles/{bundle_id}", + }] + async fn support_bundle_update( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError>; + + /// Get the current clickhouse policy + #[endpoint { + method = GET, + path = "/clickhouse/policy" + }] + async fn clickhouse_policy_get( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Set the new clickhouse policy + #[endpoint { + method = POST, + path = "/clickhouse/policy" + }] + async fn clickhouse_policy_set( + rqctx: RequestContext, + policy: TypedBody, + ) -> Result; + + /// Get the current oximeter read policy + #[endpoint { + method = GET, + path = "/oximeter/read-policy" + }] + async fn oximeter_read_policy_get( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Set the new oximeter read policy + #[endpoint { + method = POST, + path = "/oximeter/read-policy" + }] + async fn oximeter_read_policy_set( + rqctx: RequestContext, + policy: TypedBody, + ) -> Result; + + /// Begin quiescing this Nexus instance + /// + /// This causes no new sagas to be started and eventually causes no database + /// connections to become available. This is a one-way trip. There's no + /// unquiescing Nexus. + #[endpoint { + method = POST, + path = "/quiesce" + }] + async fn quiesce_start( + rqctx: RequestContext, + ) -> Result; + + /// Check whether Nexus is running normally, quiescing, or fully quiesced. + #[endpoint { + method = GET, + path = "/quiesce" + }] + async fn quiesce_get( + rqctx: RequestContext, + ) -> Result, HttpError>; +} + +/// Path parameters for Instance requests (internal API) +#[derive(Deserialize, JsonSchema)] +pub struct InstancePathParam { + pub instance_id: InstanceUuid, +} + +/// Path parameters for Saga requests +#[derive(Deserialize, JsonSchema)] +pub struct SagaPathParam { + #[serde(rename = "saga_id")] + pub saga_id: Uuid, +} + +/// Path parameters for DemoSaga requests +#[derive(Deserialize, JsonSchema)] +pub struct DemoSagaPathParam { + pub demo_saga_id: DemoSagaUuid, +} + +/// Path parameters for Background Task requests +#[derive(Deserialize, JsonSchema)] +pub struct BackgroundTaskPathParam { + pub bgtask_name: String, +} + +/// Query parameters for Background Task activation requests. +#[derive(Deserialize, JsonSchema)] +pub struct BackgroundTasksActivateRequest { + pub bgtask_names: BTreeSet, +} + +#[derive(Clone, Debug, Serialize, JsonSchema)] +pub struct SledId { + pub id: SledUuid, +} + +#[derive(Deserialize, JsonSchema)] +pub struct VersionPathParam { + pub version: u32, } diff --git a/nexus/reconfigurator/cli-integration-tests/Cargo.toml b/nexus/reconfigurator/cli-integration-tests/Cargo.toml index 3ee7bfba036..30c824a22f5 100644 --- a/nexus/reconfigurator/cli-integration-tests/Cargo.toml +++ b/nexus/reconfigurator/cli-integration-tests/Cargo.toml @@ -20,13 +20,13 @@ pq-sys = "*" reconfigurator-cli.workspace = true [dev-dependencies] -camino-tempfile.workspace = true camino.workspace = true -nexus-client.workspace = true +camino-tempfile.workspace = true nexus-db-queries.workspace = true +nexus-lockstep-client.workspace = true nexus-reconfigurator-preparation.workspace = true -nexus-test-utils-macros.workspace = true nexus-test-utils.workspace = true +nexus-test-utils-macros.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-nexus.workspace = true diff --git a/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs b/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs index ae0b2c7e311..0ac5b596670 100644 --- a/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs +++ b/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs @@ -194,10 +194,10 @@ async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { assert_eq!(new_blueprint, new_blueprint2); // Import the new blueprint. - let nexus_internal_url = - format!("http://{}/", cptestctx.internal_client.bind_address); + let nexus_lockstep_url = + format!("http://{}/", cptestctx.lockstep_client.bind_address); let nexus_client = - nexus_client::Client::new(&nexus_internal_url, log.clone()); + nexus_lockstep_client::Client::new(&nexus_lockstep_url, log.clone()); nexus_client .blueprint_import(&new_blueprint) .await @@ -212,10 +212,12 @@ async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { // Set the blueprint as the (disabled) target. nexus_client - .blueprint_target_set(&nexus_client::types::BlueprintTargetSet { - target_id: new_blueprint.id, - enabled: false, - }) + .blueprint_target_set( + &nexus_lockstep_client::types::BlueprintTargetSet { + target_id: new_blueprint.id, + enabled: false, + }, + ) .await .context("setting target blueprint") .unwrap(); diff --git a/nexus/src/app/quiesce.rs b/nexus/src/app/quiesce.rs index b5c97354f1c..853425e3909 100644 --- a/nexus/src/app/quiesce.rs +++ b/nexus/src/app/quiesce.rs @@ -517,9 +517,9 @@ mod test { use diesel::ExpressionMethods; use diesel::QueryDsl; use http::StatusCode; - use nexus_client::types::QuiesceState; - use nexus_client::types::QuiesceStatus; use nexus_db_model::DbMetadataNexusState; + use nexus_lockstep_client::types::QuiesceState; + use nexus_lockstep_client::types::QuiesceStatus; use nexus_test_interface::NexusServer; use nexus_test_utils::db::TestDatabase; use nexus_test_utils_macros::nexus_test; @@ -540,11 +540,12 @@ mod test { type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; - type NexusClientError = nexus_client::Error; + type NexusClientError = + nexus_lockstep_client::Error; async fn wait_quiesce( log: &Logger, - client: &nexus_client::Client, + client: &nexus_lockstep_client::Client, timeout: Duration, ) -> QuiesceStatus { wait_for_condition( @@ -558,9 +559,7 @@ mod test { if matches!(rv.state, QuiesceState::Quiesced { .. }) { Ok(rv) } else { - Err(CondCheckError::< - nexus_client::Error, - >::NotYet) + Err(CondCheckError::::NotYet) } }, &Duration::from_millis(50), @@ -628,12 +627,14 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_quiesce_easy(cptestctx: &ControlPlaneTestContext) { let log = &cptestctx.logctx.log; - let nexus_internal_url = format!( + let nexus_lockstep_url = format!( "http://{}", - cptestctx.server.get_http_server_internal_address().await + cptestctx.server.get_http_server_lockstep_address().await + ); + let nexus_client = nexus_lockstep_client::Client::new( + &nexus_lockstep_url, + log.clone(), ); - let nexus_client = - nexus_client::Client::new(&nexus_internal_url, log.clone()); // We need to enable blueprint execution in order to complete a saga // assignment pass, which is required for quiescing to work. @@ -657,12 +658,14 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_quiesce_full(cptestctx: &ControlPlaneTestContext) { let log = &cptestctx.logctx.log; - let nexus_internal_url = format!( + let nexus_lockstep_url = format!( "http://{}", - cptestctx.server.get_http_server_internal_address().await + cptestctx.server.get_http_server_lockstep_address().await + ); + let nexus_client = nexus_lockstep_client::Client::new( + &nexus_lockstep_url, + log.clone(), ); - let nexus_client = - nexus_client::Client::new(&nexus_internal_url, log.clone()); // We need to enable blueprint execution in order to complete a saga // assignment pass, which is required for quiescing to work. diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index f44c20e0f70..89eb026ffef 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -5,15 +5,10 @@ //! Handler functions (entrypoints) for HTTP APIs internal to the control plane use super::params::{OximeterInfo, RackInitializationRequest}; -use crate::app::support_bundles::SupportBundleQueryType; use crate::context::ApiContext; -use crate::external_api::shared; use dropshot::ApiDescription; -use dropshot::Body; -use dropshot::Header; use dropshot::HttpError; use dropshot::HttpResponseCreated; -use dropshot::HttpResponseDeleted; use dropshot::HttpResponseOk; use dropshot::HttpResponseUpdatedNoContent; use dropshot::Path; @@ -21,43 +16,14 @@ use dropshot::Query; use dropshot::RequestContext; use dropshot::ResultsPage; use dropshot::TypedBody; -use http::Response; use nexus_internal_api::*; -use nexus_types::deployment::Blueprint; -use nexus_types::deployment::BlueprintMetadata; -use nexus_types::deployment::BlueprintTarget; -use nexus_types::deployment::BlueprintTargetSet; -use nexus_types::deployment::ClickhousePolicy; -use nexus_types::deployment::OximeterReadPolicy; -use nexus_types::deployment::ReconfiguratorConfigParam; -use nexus_types::deployment::ReconfiguratorConfigView; -use nexus_types::external_api::headers::RangeRequest; -use nexus_types::external_api::params::PhysicalDiskPath; -use nexus_types::external_api::params::SledSelector; -use nexus_types::external_api::params::SupportBundleFilePath; -use nexus_types::external_api::params::SupportBundlePath; -use nexus_types::external_api::params::SupportBundleUpdate; -use nexus_types::external_api::params::UninitializedSledId; use nexus_types::external_api::shared::ProbeInfo; -use nexus_types::external_api::shared::UninitializedSled; -use nexus_types::external_api::views::SledPolicy; -use nexus_types::internal_api::params::InstanceMigrateRequest; use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; -use nexus_types::internal_api::views::BackgroundTask; -use nexus_types::internal_api::views::DemoSaga; -use nexus_types::internal_api::views::MgsUpdateDriverStatus; use nexus_types::internal_api::views::NatEntryView; -use nexus_types::internal_api::views::QuiesceStatus; -use nexus_types::internal_api::views::Saga; -use nexus_types::internal_api::views::UpdateStatus; -use nexus_types::internal_api::views::to_list; -use omicron_common::api::external::Instance; use omicron_common::api::external::http_pagination::PaginatedById; -use omicron_common::api::external::http_pagination::PaginatedByTimeAndId; use omicron_common::api::external::http_pagination::ScanById; -use omicron_common::api::external::http_pagination::ScanByTimeAndId; use omicron_common::api::external::http_pagination::ScanParams; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::internal::nexus::DiskRuntimeState; @@ -69,9 +35,6 @@ use omicron_common::api::internal::nexus::RepairFinishInfo; use omicron_common::api::internal::nexus::RepairProgress; use omicron_common::api::internal::nexus::RepairStartInfo; use omicron_common::api::internal::nexus::SledVmmState; -use omicron_uuid_kinds::*; -use range_requests::PotentialRange; -use std::collections::BTreeMap; type NexusApiDescription = ApiDescription; @@ -210,29 +173,6 @@ impl NexusInternalApi for NexusInternalApiImpl { .await } - async fn instance_migrate( - rqctx: RequestContext, - path_params: Path, - migrate_params: TypedBody, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let migrate = migrate_params.into_inner(); - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let instance = nexus - .instance_migrate(&opctx, path.instance_id, migrate) - .await?; - Ok(HttpResponseOk(instance.into())) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - async fn cpapi_disks_put( rqctx: RequestContext, path_params: Path, @@ -512,125 +452,6 @@ impl NexusInternalApi for NexusInternalApiImpl { .await } - // Debug interfaces for Sagas - - async fn saga_list( - rqctx: RequestContext, - query_params: Query, - ) -> Result>, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let saga_stream = nexus.sagas_list(&opctx, &pagparams).await?; - let view_list = to_list(saga_stream).await; - Ok(HttpResponseOk(ScanById::results_page( - &query, - view_list, - &|_, saga: &Saga| saga.id, - )?)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn saga_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let saga = nexus.saga_get(&opctx, path.saga_id).await?; - Ok(HttpResponseOk(saga)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn saga_demo_create( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let demo_saga = nexus.saga_demo_create().await?; - Ok(HttpResponseOk(demo_saga)) - }; - - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn saga_demo_complete( - rqctx: RequestContext, - path_params: Path, - ) -> Result { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - nexus.saga_demo_complete(path.demo_saga_id)?; - Ok(HttpResponseUpdatedNoContent()) - }; - - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - // Debug interfaces for Background Tasks - - async fn bgtask_list( - rqctx: RequestContext, - ) -> Result>, HttpError> - { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let bgtask_list = nexus.bgtasks_list(&opctx).await?; - Ok(HttpResponseOk(bgtask_list)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn bgtask_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let bgtask = nexus.bgtask_status(&opctx, &path.bgtask_name).await?; - Ok(HttpResponseOk(bgtask)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - async fn bgtask_activate( rqctx: RequestContext, body: TypedBody, @@ -650,24 +471,6 @@ impl NexusInternalApi for NexusInternalApiImpl { .await } - // Debug interfaces for MGS updates - - async fn mgs_updates( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - Ok(HttpResponseOk(nexus.mgs_updates(&opctx).await?)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - // NAT RPW internal APIs async fn ipv4_nat_changeset( @@ -695,660 +498,11 @@ impl NexusInternalApi for NexusInternalApiImpl { .await } - // APIs for managing blueprints - async fn blueprint_list( + async fn probes_get( rqctx: RequestContext, + path_params: Path, query_params: Query, - ) -> Result>, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let query = query_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let pagparams = data_page_params_for(&rqctx, &query)?; - let blueprints = nexus.blueprint_list(&opctx, &pagparams).await?; - Ok(HttpResponseOk(ScanById::results_page( - &query, - blueprints, - &|_, blueprint: &BlueprintMetadata| { - blueprint.id.into_untyped_uuid() - }, - )?)) - }; - - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - /// Fetches one blueprint - async fn blueprint_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let blueprint = - nexus.blueprint_view(&opctx, path.blueprint_id).await?; - Ok(HttpResponseOk(blueprint)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - /// Deletes one blueprint - async fn blueprint_delete( - rqctx: RequestContext, - path_params: Path, - ) -> Result { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - nexus.blueprint_delete(&opctx, path.blueprint_id).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn blueprint_target_view( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let target = nexus.blueprint_target_view(&opctx).await?; - Ok(HttpResponseOk(target)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn blueprint_target_set( - rqctx: RequestContext, - target: TypedBody, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let target = target.into_inner(); - let target = nexus.blueprint_target_set(&opctx, target).await?; - Ok(HttpResponseOk(target)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn blueprint_target_set_enabled( - rqctx: RequestContext, - target: TypedBody, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let target = target.into_inner(); - let target = - nexus.blueprint_target_set_enabled(&opctx, target).await?; - Ok(HttpResponseOk(target)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn blueprint_regenerate( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let result = nexus.blueprint_create_regenerate(&opctx).await?; - Ok(HttpResponseOk(result)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn blueprint_import( - rqctx: RequestContext, - blueprint: TypedBody, - ) -> Result { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let blueprint = blueprint.into_inner(); - nexus.blueprint_import(&opctx, blueprint).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn reconfigurator_config_show_current( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let datastore = &apictx.nexus.datastore(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - match datastore.reconfigurator_config_get_latest(&opctx).await? { - Some(switches) => Ok(HttpResponseOk(switches)), - None => Err(HttpError::for_not_found( - None, - "No config in database".into(), - )), - } - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn reconfigurator_config_show( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let datastore = &apictx.nexus.datastore(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let version = path_params.into_inner().version; - match datastore.reconfigurator_config_get(&opctx, version).await? { - Some(switches) => Ok(HttpResponseOk(switches)), - None => Err(HttpError::for_not_found( - None, - format!("No config in database at version {version}"), - )), - } - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn reconfigurator_config_set( - rqctx: RequestContext, - switches: TypedBody, - ) -> Result { - let apictx = &rqctx.context().context; - let handler = async { - let datastore = &apictx.nexus.datastore(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - datastore - .reconfigurator_config_insert_latest_version( - &opctx, - switches.into_inner(), - ) - .await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn update_status( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let nexus = &apictx.nexus; - let result = nexus.update_status(&opctx).await?; - Ok(HttpResponseOk(result)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn sled_list_uninitialized( - rqctx: RequestContext, - ) -> Result>, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let sleds = nexus.sled_list_uninitialized(&opctx).await?; - Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn sled_add( - rqctx: RequestContext, - sled: TypedBody, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let id = nexus.sled_add(&opctx, sled.into_inner()).await?; - Ok(HttpResponseCreated(SledId { id })) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn sled_expunge( - rqctx: RequestContext, - sled: TypedBody, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let previous_policy = - nexus.sled_expunge(&opctx, sled.into_inner().sled).await?; - Ok(HttpResponseOk(previous_policy)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn physical_disk_expunge( - rqctx: RequestContext, - disk: TypedBody, - ) -> Result { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - nexus.physical_disk_expunge(&opctx, disk.into_inner()).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_list( - rqctx: RequestContext, - query_params: Query, - ) -> Result>, HttpError> - { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let bundles = nexus - .support_bundle_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByTimeAndId::results_page( - &query, - bundles, - &|_, bundle: &shared::SupportBundleInfo| { - (bundle.time_created, bundle.id.into_untyped_uuid()) - }, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_view( - rqctx: RequestContext, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let bundle = nexus - .support_bundle_view( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - ) - .await?; - - Ok(HttpResponseOk(bundle.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_index( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let head = false; - let range = headers - .into_inner() - .range - .map(|r| PotentialRange::new(r.as_bytes())); - - let body = nexus - .support_bundle_download( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - SupportBundleQueryType::Index, - head, - range, - ) - .await?; - Ok(body) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_download( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let head = false; - let range = headers - .into_inner() - .range - .map(|r| PotentialRange::new(r.as_bytes())); - - let body = nexus - .support_bundle_download( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - SupportBundleQueryType::Whole, - head, - range, - ) - .await?; - Ok(body) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_download_file( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let head = false; - let range = headers - .into_inner() - .range - .map(|r| PotentialRange::new(r.as_bytes())); - - let body = nexus - .support_bundle_download( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle.bundle_id), - SupportBundleQueryType::Path { file_path: path.file }, - head, - range, - ) - .await?; - Ok(body) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_head( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let head = true; - let range = headers - .into_inner() - .range - .map(|r| PotentialRange::new(r.as_bytes())); - - let body = nexus - .support_bundle_download( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - SupportBundleQueryType::Whole, - head, - range, - ) - .await?; - Ok(body) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_head_file( - rqctx: RequestContext, - headers: Header, - path_params: Path, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let head = true; - let range = headers - .into_inner() - .range - .map(|r| PotentialRange::new(r.as_bytes())); - - let body = nexus - .support_bundle_download( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle.bundle_id), - SupportBundleQueryType::Path { file_path: path.file }, - head, - range, - ) - .await?; - Ok(body) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_create( - rqctx: RequestContext, - body: TypedBody, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let create_params = body.into_inner(); - - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let bundle = nexus - .support_bundle_create( - &opctx, - "Created by internal API", - create_params.user_comment, - ) - .await?; - Ok(HttpResponseCreated(bundle.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_delete( - rqctx: RequestContext, - path_params: Path, - ) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - nexus - .support_bundle_delete( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - ) - .await?; - - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn support_bundle_update( - rqctx: RequestContext, - path_params: Path, - body: TypedBody, - ) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let update = body.into_inner(); - - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - - let bundle = nexus - .support_bundle_update_user_comment( - &opctx, - SupportBundleUuid::from_untyped_uuid(path.bundle_id), - update.user_comment, - ) - .await?; - - Ok(HttpResponseOk(bundle.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn probes_get( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - ) -> Result>, HttpError> { + ) -> Result>, HttpError> { let apictx = &rqctx.context().context; let handler = async { let query = query_params.into_inner(); @@ -1368,128 +522,4 @@ impl NexusInternalApi for NexusInternalApiImpl { .instrument_dropshot_handler(&rqctx, handler) .await } - - async fn clickhouse_policy_get( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - match nexus.datastore().clickhouse_policy_get_latest(&opctx).await? - { - Some(policy) => Ok(HttpResponseOk(policy)), - None => Err(HttpError::for_not_found( - None, - "No clickhouse policy in database".into(), - )), - } - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn clickhouse_policy_set( - rqctx: RequestContext, - policy: TypedBody, - ) -> Result { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - nexus - .datastore() - .clickhouse_policy_insert_latest_version( - &opctx, - &policy.into_inner(), - ) - .await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn oximeter_read_policy_get( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let handler = async { - let nexus = &apictx.nexus; - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - let policy = nexus - .datastore() - .oximeter_read_policy_get_latest(&opctx) - .await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn oximeter_read_policy_set( - rqctx: RequestContext, - policy: TypedBody, - ) -> Result { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - nexus - .datastore() - .oximeter_read_policy_insert_latest_version( - &opctx, - &policy.into_inner(), - ) - .await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn quiesce_start( - rqctx: RequestContext, - ) -> Result { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - nexus.quiesce_start(&opctx).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } - - async fn quiesce_get( - rqctx: RequestContext, - ) -> Result, HttpError> { - let apictx = &rqctx.context().context; - let nexus = &apictx.nexus; - let handler = async { - let opctx = - crate::context::op_context_for_internal_api(&rqctx).await; - Ok(HttpResponseOk(nexus.quiesce_state(&opctx).await?)) - }; - apictx - .internal_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await - } } diff --git a/nexus/src/lockstep_api/http_entrypoints.rs b/nexus/src/lockstep_api/http_entrypoints.rs index 5d771aa5912..463b0883deb 100644 --- a/nexus/src/lockstep_api/http_entrypoints.rs +++ b/nexus/src/lockstep_api/http_entrypoints.rs @@ -5,9 +5,61 @@ //! Handler functions (entrypoints) for HTTP APIs internal to the control plane //! whose callers are updated in lockstep with Nexus -use crate::context::ApiContext; +use std::collections::BTreeMap; + use dropshot::ApiDescription; +use dropshot::Body; +use dropshot::Header; +use dropshot::HttpError; +use dropshot::HttpResponseCreated; +use dropshot::HttpResponseDeleted; +use dropshot::HttpResponseOk; +use dropshot::HttpResponseUpdatedNoContent; +use dropshot::Path; +use dropshot::Query; +use dropshot::RequestContext; +use dropshot::ResultsPage; +use dropshot::TypedBody; +use http::Response; use nexus_lockstep_api::*; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintMetadata; +use nexus_types::deployment::BlueprintTarget; +use nexus_types::deployment::BlueprintTargetSet; +use nexus_types::deployment::ClickhousePolicy; +use nexus_types::deployment::OximeterReadPolicy; +use nexus_types::deployment::ReconfiguratorConfigParam; +use nexus_types::deployment::ReconfiguratorConfigView; +use nexus_types::external_api::headers::RangeRequest; +use nexus_types::external_api::params::PhysicalDiskPath; +use nexus_types::external_api::params::SledSelector; +use nexus_types::external_api::params::SupportBundleFilePath; +use nexus_types::external_api::params::SupportBundlePath; +use nexus_types::external_api::params::SupportBundleUpdate; +use nexus_types::external_api::params::UninitializedSledId; +use nexus_types::external_api::shared; +use nexus_types::external_api::shared::UninitializedSled; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::internal_api::params::InstanceMigrateRequest; +use nexus_types::internal_api::views::BackgroundTask; +use nexus_types::internal_api::views::DemoSaga; +use nexus_types::internal_api::views::MgsUpdateDriverStatus; +use nexus_types::internal_api::views::QuiesceStatus; +use nexus_types::internal_api::views::Saga; +use nexus_types::internal_api::views::UpdateStatus; +use nexus_types::internal_api::views::to_list; +use omicron_common::api::external::Instance; +use omicron_common::api::external::http_pagination::PaginatedById; +use omicron_common::api::external::http_pagination::PaginatedByTimeAndId; +use omicron_common::api::external::http_pagination::ScanById; +use omicron_common::api::external::http_pagination::ScanByTimeAndId; +use omicron_common::api::external::http_pagination::ScanParams; +use omicron_common::api::external::http_pagination::data_page_params_for; +use omicron_uuid_kinds::*; +use range_requests::PotentialRange; + +use crate::app::support_bundles::SupportBundleQueryType; +use crate::context::ApiContext; type NexusApiDescription = ApiDescription; @@ -21,4 +73,956 @@ enum NexusLockstepApiImpl {} impl NexusLockstepApi for NexusLockstepApiImpl { type Context = ApiContext; + + async fn instance_migrate( + rqctx: RequestContext, + path_params: Path, + migrate_params: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let migrate = migrate_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let instance = nexus + .instance_migrate(&opctx, path.instance_id, migrate) + .await?; + Ok(HttpResponseOk(instance.into())) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + // Debug interfaces for Sagas + + async fn saga_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let saga_stream = nexus.sagas_list(&opctx, &pagparams).await?; + let view_list = to_list(saga_stream).await; + Ok(HttpResponseOk(ScanById::results_page( + &query, + view_list, + &|_, saga: &Saga| saga.id, + )?)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn saga_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let saga = nexus.saga_get(&opctx, path.saga_id).await?; + Ok(HttpResponseOk(saga)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn saga_demo_create( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let demo_saga = nexus.saga_demo_create().await?; + Ok(HttpResponseOk(demo_saga)) + }; + + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn saga_demo_complete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + nexus.saga_demo_complete(path.demo_saga_id)?; + Ok(HttpResponseUpdatedNoContent()) + }; + + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + // Debug interfaces for Background Tasks + + async fn bgtask_list( + rqctx: RequestContext, + ) -> Result>, HttpError> + { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let bgtask_list = nexus.bgtasks_list(&opctx).await?; + Ok(HttpResponseOk(bgtask_list)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn bgtask_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let bgtask = nexus.bgtask_status(&opctx, &path.bgtask_name).await?; + Ok(HttpResponseOk(bgtask)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn bgtask_activate( + rqctx: RequestContext, + body: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let body = body.into_inner(); + nexus.bgtask_activate(&opctx, body.bgtask_names).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + // Debug interfaces for MGS updates + + async fn mgs_updates( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + Ok(HttpResponseOk(nexus.mgs_updates(&opctx).await?)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + // APIs for managing blueprints + async fn blueprint_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let pagparams = data_page_params_for(&rqctx, &query)?; + let blueprints = nexus.blueprint_list(&opctx, &pagparams).await?; + Ok(HttpResponseOk(ScanById::results_page( + &query, + blueprints, + &|_, blueprint: &BlueprintMetadata| { + blueprint.id.into_untyped_uuid() + }, + )?)) + }; + + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Fetches one blueprint + async fn blueprint_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let blueprint = + nexus.blueprint_view(&opctx, path.blueprint_id).await?; + Ok(HttpResponseOk(blueprint)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Deletes one blueprint + async fn blueprint_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + nexus.blueprint_delete(&opctx, path.blueprint_id).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn blueprint_target_view( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let target = nexus.blueprint_target_view(&opctx).await?; + Ok(HttpResponseOk(target)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn blueprint_target_set( + rqctx: RequestContext, + target: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let target = target.into_inner(); + let target = nexus.blueprint_target_set(&opctx, target).await?; + Ok(HttpResponseOk(target)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn blueprint_target_set_enabled( + rqctx: RequestContext, + target: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let target = target.into_inner(); + let target = + nexus.blueprint_target_set_enabled(&opctx, target).await?; + Ok(HttpResponseOk(target)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn blueprint_regenerate( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let result = nexus.blueprint_create_regenerate(&opctx).await?; + Ok(HttpResponseOk(result)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn blueprint_import( + rqctx: RequestContext, + blueprint: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let blueprint = blueprint.into_inner(); + nexus.blueprint_import(&opctx, blueprint).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn reconfigurator_config_show_current( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let datastore = &apictx.nexus.datastore(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + match datastore.reconfigurator_config_get_latest(&opctx).await? { + Some(switches) => Ok(HttpResponseOk(switches)), + None => Err(HttpError::for_not_found( + None, + "No config in database".into(), + )), + } + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn reconfigurator_config_show( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let datastore = &apictx.nexus.datastore(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let version = path_params.into_inner().version; + match datastore.reconfigurator_config_get(&opctx, version).await? { + Some(switches) => Ok(HttpResponseOk(switches)), + None => Err(HttpError::for_not_found( + None, + format!("No config in database at version {version}"), + )), + } + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn reconfigurator_config_set( + rqctx: RequestContext, + switches: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let datastore = &apictx.nexus.datastore(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + datastore + .reconfigurator_config_insert_latest_version( + &opctx, + switches.into_inner(), + ) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn update_status( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let result = nexus.update_status(&opctx).await?; + Ok(HttpResponseOk(result)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn sled_list_uninitialized( + rqctx: RequestContext, + ) -> Result>, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let sleds = nexus.sled_list_uninitialized(&opctx).await?; + Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn sled_add( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let id = nexus.sled_add(&opctx, sled.into_inner()).await?; + Ok(HttpResponseCreated(SledId { id })) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn sled_expunge( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let previous_policy = + nexus.sled_expunge(&opctx, sled.into_inner().sled).await?; + Ok(HttpResponseOk(previous_policy)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn physical_disk_expunge( + rqctx: RequestContext, + disk: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + nexus.physical_disk_expunge(&opctx, disk.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let bundles = nexus + .support_bundle_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByTimeAndId::results_page( + &query, + bundles, + &|_, bundle: &shared::SupportBundleInfo| { + (bundle.time_created, bundle.id.into_untyped_uuid()) + }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let bundle = nexus + .support_bundle_view( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + ) + .await?; + + Ok(HttpResponseOk(bundle.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_index( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let head = false; + let range = headers + .into_inner() + .range + .map(|r| PotentialRange::new(r.as_bytes())); + + let body = nexus + .support_bundle_download( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + SupportBundleQueryType::Index, + head, + range, + ) + .await?; + Ok(body) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_download( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let head = false; + let range = headers + .into_inner() + .range + .map(|r| PotentialRange::new(r.as_bytes())); + + let body = nexus + .support_bundle_download( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + SupportBundleQueryType::Whole, + head, + range, + ) + .await?; + Ok(body) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_download_file( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let head = false; + let range = headers + .into_inner() + .range + .map(|r| PotentialRange::new(r.as_bytes())); + + let body = nexus + .support_bundle_download( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle.bundle_id), + SupportBundleQueryType::Path { file_path: path.file }, + head, + range, + ) + .await?; + Ok(body) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_head( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let head = true; + let range = headers + .into_inner() + .range + .map(|r| PotentialRange::new(r.as_bytes())); + + let body = nexus + .support_bundle_download( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + SupportBundleQueryType::Whole, + head, + range, + ) + .await?; + Ok(body) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_head_file( + rqctx: RequestContext, + headers: Header, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let head = true; + let range = headers + .into_inner() + .range + .map(|r| PotentialRange::new(r.as_bytes())); + + let body = nexus + .support_bundle_download( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle.bundle_id), + SupportBundleQueryType::Path { file_path: path.file }, + head, + range, + ) + .await?; + Ok(body) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_create( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let create_params = body.into_inner(); + + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let bundle = nexus + .support_bundle_create( + &opctx, + "Created by internal API", + create_params.user_comment, + ) + .await?; + Ok(HttpResponseCreated(bundle.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + nexus + .support_bundle_delete( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + ) + .await?; + + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn support_bundle_update( + rqctx: RequestContext, + path_params: Path, + body: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let update = body.into_inner(); + + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + + let bundle = nexus + .support_bundle_update_user_comment( + &opctx, + SupportBundleUuid::from_untyped_uuid(path.bundle_id), + update.user_comment, + ) + .await?; + + Ok(HttpResponseOk(bundle.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn clickhouse_policy_get( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + match nexus.datastore().clickhouse_policy_get_latest(&opctx).await? + { + Some(policy) => Ok(HttpResponseOk(policy)), + None => Err(HttpError::for_not_found( + None, + "No clickhouse policy in database".into(), + )), + } + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn clickhouse_policy_set( + rqctx: RequestContext, + policy: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + nexus + .datastore() + .clickhouse_policy_insert_latest_version( + &opctx, + &policy.into_inner(), + ) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn oximeter_read_policy_get( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let handler = async { + let nexus = &apictx.nexus; + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + let policy = nexus + .datastore() + .oximeter_read_policy_get_latest(&opctx) + .await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn oximeter_read_policy_set( + rqctx: RequestContext, + policy: TypedBody, + ) -> Result { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + nexus + .datastore() + .oximeter_read_policy_insert_latest_version( + &opctx, + &policy.into_inner(), + ) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn quiesce_start( + rqctx: RequestContext, + ) -> Result { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + nexus.quiesce_start(&opctx).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn quiesce_get( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = &rqctx.context().context; + let nexus = &apictx.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_internal_api(&rqctx).await; + Ok(HttpResponseOk(nexus.quiesce_state(&opctx).await?)) + }; + apictx + .internal_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } } diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 81ab9be3f9f..0423823fc6c 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -21,6 +21,7 @@ futures.workspace = true gateway-messages.workspace = true gateway-test-utils.workspace = true headers.workspace = true +hickory-resolver.workspace = true http.workspace = true http-body-util.workspace = true hyper.workspace = true @@ -28,9 +29,9 @@ id-map.workspace = true illumos-utils.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true -nexus-client.workspace = true nexus-config.workspace = true nexus-db-queries = { workspace = true, features = [ "testing" ] } +nexus-lockstep-client.workspace = true nexus-sled-agent-shared.workspace = true nexus-test-interface.workspace = true nexus-types.workspace = true @@ -40,6 +41,7 @@ omicron-passwords.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true omicron-uuid-kinds.workspace = true +omicron-workspace-hack.workspace = true oximeter.workspace = true oximeter-collector.workspace = true oximeter-producer.workspace = true @@ -54,9 +56,7 @@ slog-error-chain.workspace = true tokio.workspace = true tokio-postgres = { workspace = true, features = ["with-serde_json-1"] } tokio-util.workspace = true -hickory-resolver.workspace = true uuid.workspace = true -omicron-workspace-hack.workspace = true [features] omicron-dev = ["omicron-test-utils/seed-gen"] diff --git a/nexus/test-utils/src/background.rs b/nexus/test-utils/src/background.rs index 725452c6e3c..7b39f69daa8 100644 --- a/nexus/test-utils/src/background.rs +++ b/nexus/test-utils/src/background.rs @@ -6,9 +6,9 @@ use crate::http_testing::NexusRequest; use dropshot::test_util::ClientTestContext; -use nexus_client::types::BackgroundTask; -use nexus_client::types::CurrentStatus; -use nexus_client::types::LastResult; +use nexus_lockstep_client::types::BackgroundTask; +use nexus_lockstep_client::types::CurrentStatus; +use nexus_lockstep_client::types::LastResult; use nexus_types::internal_api::background::*; use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use slog::info; @@ -18,14 +18,14 @@ use std::time::Duration; /// running, then return the last polled `BackgroundTask` object. Panics if the /// task has never been activated. pub async fn wait_background_task( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, task_name: &str, ) -> BackgroundTask { // Wait for the task to finish let last_task_poll = wait_for_condition( || async { let task = NexusRequest::object_get( - internal_client, + lockstep_client, &format!("/bgtasks/view/{task_name}"), ) .execute_and_parse_unwrap::() @@ -55,7 +55,7 @@ pub async fn wait_background_task( /// Given the name of a background task, activate it, then wait for it to /// complete. Return the `BackgroundTask` object from this invocation. pub async fn activate_background_task( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, task_name: &str, ) -> BackgroundTask { // If it is running, wait for an existing task to complete - this function @@ -65,7 +65,7 @@ pub async fn activate_background_task( let previous_task = wait_for_condition( || async { let task = NexusRequest::object_get( - internal_client, + lockstep_client, &format!("/bgtasks/view/{task_name}"), ) .execute_and_parse_unwrap::() @@ -76,7 +76,7 @@ pub async fn activate_background_task( } info!( - internal_client.client_log, + lockstep_client.client_log, "waiting for {task_name} to go idle", ); @@ -88,7 +88,7 @@ pub async fn activate_background_task( .await .expect("task never went to idle"); - internal_client + lockstep_client .make_request( http::Method::POST, "/bgtasks/activate", @@ -110,7 +110,7 @@ pub async fn activate_background_task( let last_task_poll = wait_for_condition( || async { let task = NexusRequest::object_get( - internal_client, + lockstep_client, &format!("/bgtasks/view/{task_name}"), ) .execute_and_parse_unwrap::() @@ -174,10 +174,10 @@ pub async fn activate_background_task( /// Run the region_replacement background task, returning how many actions /// were taken pub async fn run_region_replacement( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = - activate_background_task(&internal_client, "region_replacement").await; + activate_background_task(&lockstep_client, "region_replacement").await; let LastResult::Completed(last_result_completed) = last_background_task.last @@ -203,10 +203,10 @@ pub async fn run_region_replacement( /// Run the region_replacement_driver background task, returning how many actions /// were taken pub async fn run_region_replacement_driver( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = - activate_background_task(&internal_client, "region_replacement_driver") + activate_background_task(&lockstep_client, "region_replacement_driver") .await; let LastResult::Completed(last_result_completed) = @@ -231,10 +231,10 @@ pub async fn run_region_replacement_driver( /// Run the region_snapshot_replacement_start background task, returning how many /// actions were taken pub async fn run_region_snapshot_replacement_start( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_start", ) .await; @@ -263,10 +263,10 @@ pub async fn run_region_snapshot_replacement_start( /// Run the region_snapshot_replacement_garbage_collection background task, /// returning how many actions were taken pub async fn run_region_snapshot_replacement_garbage_collection( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_garbage_collection", ) .await; @@ -294,10 +294,10 @@ pub async fn run_region_snapshot_replacement_garbage_collection( /// Run the region_snapshot_replacement_step background task, returning how many /// actions were taken pub async fn run_region_snapshot_replacement_step( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_step", ) .await; @@ -327,10 +327,10 @@ pub async fn run_region_snapshot_replacement_step( /// Run the region_snapshot_replacement_finish background task, returning how many /// actions were taken pub async fn run_region_snapshot_replacement_finish( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_finish", ) .await; @@ -359,10 +359,10 @@ pub async fn run_region_snapshot_replacement_finish( /// Run the read_only_region_replacement_start background task, returning how /// many actions were taken pub async fn run_read_only_region_replacement_start( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { let last_background_task = activate_background_task( - &internal_client, + &lockstep_client, "read_only_region_replacement_start", ) .await; @@ -391,24 +391,24 @@ pub async fn run_read_only_region_replacement_start( /// Run all replacement related background tasks and return how many actions /// were taken. pub async fn run_all_crucible_replacement_tasks( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> usize { // region replacement related - run_region_replacement(internal_client).await + - run_region_replacement_driver(internal_client).await + + run_region_replacement(lockstep_client).await + + run_region_replacement_driver(lockstep_client).await + // region snapshot replacement related - run_region_snapshot_replacement_start(internal_client).await + - run_region_snapshot_replacement_garbage_collection(internal_client).await + - run_region_snapshot_replacement_step(internal_client).await + - run_region_snapshot_replacement_finish(internal_client).await + - run_read_only_region_replacement_start(internal_client).await + run_region_snapshot_replacement_start(lockstep_client).await + + run_region_snapshot_replacement_garbage_collection(lockstep_client).await + + run_region_snapshot_replacement_step(lockstep_client).await + + run_region_snapshot_replacement_finish(lockstep_client).await + + run_read_only_region_replacement_start(lockstep_client).await } pub async fn wait_tuf_artifact_replication_step( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> TufArtifactReplicationStatus { let last_background_task = - wait_background_task(&internal_client, "tuf_artifact_replication") + wait_background_task(&lockstep_client, "tuf_artifact_replication") .await; let LastResult::Completed(last_result_completed) = @@ -429,10 +429,10 @@ pub async fn wait_tuf_artifact_replication_step( } pub async fn run_tuf_artifact_replication_step( - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) -> TufArtifactReplicationStatus { let last_background_task = - activate_background_task(&internal_client, "tuf_artifact_replication") + activate_background_task(&lockstep_client, "tuf_artifact_replication") .await; let LastResult::Completed(last_result_completed) = diff --git a/nexus/tests/integration_tests/crucible_replacements.rs b/nexus/tests/integration_tests/crucible_replacements.rs index efc87e503ec..fbca4c579c8 100644 --- a/nexus/tests/integration_tests/crucible_replacements.rs +++ b/nexus/tests/integration_tests/crucible_replacements.rs @@ -8,7 +8,6 @@ use async_bb8_diesel::AsyncRunQueryDsl; use diesel::ExpressionMethods; use diesel::QueryDsl; use dropshot::test_util::ClientTestContext; -use nexus_client::types::LastResult; use nexus_db_lookup::LookupPath; use nexus_db_model::PhysicalDiskPolicy; use nexus_db_model::ReadOnlyTargetReplacement; @@ -17,6 +16,7 @@ use nexus_db_model::RegionSnapshotReplacementState; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_db_queries::db::datastore::region_snapshot_replacement::*; +use nexus_lockstep_client::types::LastResult; use nexus_test_utils::background::*; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -95,13 +95,13 @@ where pub(crate) async fn wait_for_all_replacements( datastore: &Arc, - internal_client: &ClientTestContext, + lockstep_client: &ClientTestContext, ) { wait_for_condition( || { let datastore = datastore.clone(); let opctx = OpContext::for_tests( - internal_client.client_log.new(o!()), + lockstep_client.client_log.new(o!()), datastore.clone(), ); @@ -120,7 +120,7 @@ pub(crate) async fn wait_for_all_replacements( // can tell you that something is _currently_ moving but not // that all work is done. - run_all_crucible_replacement_tasks(internal_client).await; + run_all_crucible_replacement_tasks(lockstep_client).await; let ro_left_to_do = datastore .find_read_only_regions_on_expunged_physical_disks(&opctx) @@ -142,7 +142,7 @@ pub(crate) async fn wait_for_all_replacements( if ro_left_to_do + rw_left_to_do + rs_left_to_do > 0 { info!( - &internal_client.client_log, + &lockstep_client.client_log, "wait_for_all_replacements: ro {} rw {} rs {}", ro_left_to_do, rw_left_to_do, @@ -189,7 +189,7 @@ pub(crate) async fn wait_for_all_replacements( > 0 { info!( - &internal_client.client_log, + &lockstep_client.client_log, "wait_for_all_replacements: rr {} rsr {}", region_replacement_left, region_snapshot_replacement_left, @@ -271,10 +271,10 @@ async fn test_region_replacement_does_not_create_freed_region( // Now, run the first part of region replacement: this will move the deleted // region into a temporary volume. - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let _ = - activate_background_task(&internal_client, "region_replacement").await; + activate_background_task(&lockstep_client, "region_replacement").await; // Assert there are no freed crucible regions that result from that assert!(datastore.find_deleted_volume_regions().await.unwrap().is_empty()); @@ -297,7 +297,7 @@ mod region_replacement { datastore: Arc, disk_test: DiskTest<'a>, client: ClientTestContext, - internal_client: ClientTestContext, + lockstep_client: ClientTestContext, replacement_request_id: Uuid, } @@ -315,7 +315,7 @@ mod region_replacement { .await; let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore().clone(); let opctx = OpContext::for_tests( @@ -370,7 +370,7 @@ mod region_replacement { datastore, disk_test, client: client.clone(), - internal_client: internal_client.clone(), + lockstep_client: lockstep_client.clone(), replacement_request_id, } } @@ -397,7 +397,7 @@ mod region_replacement { pub async fn finish_test(&self) { // Make sure that all the background tasks can run to completion. - wait_for_all_replacements(&self.datastore, &self.internal_client) + wait_for_all_replacements(&self.datastore, &self.lockstep_client) .await; // Assert the request is in state Complete @@ -510,7 +510,7 @@ mod region_replacement { pub async fn transition_request_to_running(&self) { // Activate the "region replacement" background task - run_region_replacement(&self.internal_client).await; + run_region_replacement(&self.lockstep_client).await; // The activation above could only have started the associated saga, // so wait until the request is in state Running. @@ -529,7 +529,7 @@ mod region_replacement { // Run the "region replacement driver" task to attach the associated // volume to the simulated pantry. - run_region_replacement_driver(&self.internal_client).await; + run_region_replacement_driver(&self.lockstep_client).await; // The activation above could only have started the associated saga, // so wait until the request is in the expected end state. @@ -619,7 +619,7 @@ mod region_replacement { pub async fn transition_request_to_replacement_done(&self) { // Run the "region replacement driver" task - run_region_replacement_driver(&self.internal_client).await; + run_region_replacement_driver(&self.lockstep_client).await; // The activation above could only have started the associated saga, // so wait until the request is in the expected end state. @@ -865,10 +865,10 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // 1) region replacement will allocate a new region and swap it into the // disk volume. - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let _ = - activate_background_task(&internal_client, "region_replacement").await; + activate_background_task(&lockstep_client, "region_replacement").await; // After that task invocation, there should be one running region // replacement for the disk's region. Filter out the replacement request for @@ -920,7 +920,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // the snapshot volume let _ = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_start", ) .await; @@ -1004,7 +1004,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // reference count to zero. let _ = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_garbage_collection", ) .await; @@ -1067,7 +1067,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // ReplacementDone let last_background_task = - activate_background_task(&internal_client, "region_replacement_driver") + activate_background_task(&lockstep_client, "region_replacement_driver") .await; let res = match last_background_task.last { @@ -1157,7 +1157,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( let mut count = 0; loop { let actions_taken = - run_region_snapshot_replacement_step(&internal_client).await; + run_region_snapshot_replacement_step(&lockstep_client).await; if actions_taken == 0 { break; @@ -1171,7 +1171,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( } let _ = activate_background_task( - &internal_client, + &lockstep_client, "region_snapshot_replacement_finish", ) .await; @@ -1235,7 +1235,7 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // Make sure that all the background tasks can run to completion. - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; // The disk volume should be deleted by the snapshot delete: wait until this // happens @@ -1290,6 +1290,7 @@ mod region_snapshot_replacement { disk_test: DiskTest<'a>, client: ClientTestContext, internal_client: ClientTestContext, + lockstep_client: ClientTestContext, replacement_request_id: Uuid, snapshot_socket_addr: SocketAddr, } @@ -1309,6 +1310,7 @@ mod region_snapshot_replacement { let client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore().clone(); let opctx = OpContext::for_tests( @@ -1426,6 +1428,7 @@ mod region_snapshot_replacement { disk_test, client: client.clone(), internal_client: internal_client.clone(), + lockstep_client: lockstep_client.clone(), replacement_request_id, snapshot_socket_addr, } @@ -1473,7 +1476,7 @@ mod region_snapshot_replacement { pub async fn finish_test(&self) { // Make sure that all the background tasks can run to completion. - wait_for_all_replacements(&self.datastore, &self.internal_client) + wait_for_all_replacements(&self.datastore, &self.lockstep_client) .await; // Assert the request is in state Complete @@ -1594,7 +1597,7 @@ mod region_snapshot_replacement { pub async fn transition_request_to_replacement_done(&self) { // Activate the "region snapshot replacement start" background task - run_region_snapshot_replacement_start(&self.internal_client).await; + run_region_snapshot_replacement_start(&self.lockstep_client).await; // The activation above could only have started the associated saga, // so wait until the request is in state Running. @@ -1618,7 +1621,7 @@ mod region_snapshot_replacement { // background task run_region_snapshot_replacement_garbage_collection( - &self.internal_client, + &self.lockstep_client, ) .await; @@ -2095,8 +2098,8 @@ async fn test_replacement_sanity(cptestctx: &ControlPlaneTestContext) { .set_auto_activate_volumes(); // Now, run all replacement tasks to completion - let internal_client = &cptestctx.internal_client; - wait_for_all_replacements(&datastore, &internal_client).await; + let lockstep_client = &cptestctx.lockstep_client; + wait_for_all_replacements(&datastore, &lockstep_client).await; // Validate all regions are on non-expunged physical disks assert!( @@ -2172,7 +2175,7 @@ async fn test_region_replacement_triple_sanity( .await .unwrap(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2206,7 +2209,7 @@ async fn test_region_replacement_triple_sanity( .unwrap(); // Now, run all replacement tasks to completion - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } let disk_allocated_regions = @@ -2298,7 +2301,7 @@ async fn test_region_replacement_triple_sanity_2( .await .unwrap(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2338,7 +2341,7 @@ async fn test_region_replacement_triple_sanity_2( info!(log, "waiting for all replacements"); // Now, run all replacement tasks to completion - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; // Expunge the last physical disk { @@ -2370,7 +2373,7 @@ async fn test_region_replacement_triple_sanity_2( info!(log, "waiting for all replacements"); // Now, run all replacement tasks to completion - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2410,7 +2413,7 @@ async fn test_replacement_sanity_twice(cptestctx: &ControlPlaneTestContext) { let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; // Create one zpool per sled, each with one dataset. This is required for // region and region snapshot replacement to have somewhere to move the @@ -2478,7 +2481,7 @@ async fn test_replacement_sanity_twice(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } // Now, do it again, except this time specifying the read-only regions @@ -2503,7 +2506,7 @@ async fn test_replacement_sanity_twice(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } } @@ -2517,7 +2520,7 @@ async fn test_read_only_replacement_sanity( let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; // Create one zpool per sled, each with one dataset. This is required for // region and region snapshot replacement to have somewhere to move the @@ -2585,7 +2588,7 @@ async fn test_read_only_replacement_sanity( .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } // Now expunge a sled with read-only regions on it. @@ -2619,7 +2622,7 @@ async fn test_read_only_replacement_sanity( .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; // Validate all regions are on non-expunged physical disks assert!( @@ -2648,7 +2651,7 @@ async fn test_replacement_sanity_twice_after_snapshot_delete( let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; // Create one zpool per sled, each with one dataset. This is required for // region and region snapshot replacement to have somewhere to move the @@ -2753,7 +2756,7 @@ async fn test_replacement_sanity_twice_after_snapshot_delete( .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } // Now, do it again, except this time specifying the read-only regions @@ -2778,6 +2781,6 @@ async fn test_replacement_sanity_twice_after_snapshot_delete( .await .unwrap(); - wait_for_all_replacements(&datastore, &internal_client).await; + wait_for_all_replacements(&datastore, &lockstep_client).await; } } diff --git a/nexus/tests/integration_tests/demo_saga.rs b/nexus/tests/integration_tests/demo_saga.rs index b09a2917442..e5a973528ea 100644 --- a/nexus/tests/integration_tests/demo_saga.rs +++ b/nexus/tests/integration_tests/demo_saga.rs @@ -5,8 +5,8 @@ //! Smoke test for the demo saga use futures::TryStreamExt; -use nexus_client::types::Saga; -use nexus_client::types::SagaState; +use nexus_lockstep_client::types::Saga; +use nexus_lockstep_client::types::SagaState; use nexus_test_interface::NexusServer; use nexus_test_utils_macros::nexus_test; use omicron_test_utils::dev::poll::CondCheckError; @@ -21,12 +21,12 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_demo_saga(cptestctx: &ControlPlaneTestContext) { let log = &cptestctx.logctx.log; - let nexus_internal_url = format!( + let nexus_lockstep_url = format!( "http://{}", - cptestctx.server.get_http_server_internal_address().await + cptestctx.server.get_http_server_lockstep_address().await ); let nexus_client = - nexus_client::Client::new(&nexus_internal_url, log.clone()); + nexus_lockstep_client::Client::new(&nexus_lockstep_url, log.clone()); let sagas_before = list_sagas(&nexus_client).await; eprintln!("found sagas (before): {:?}", sagas_before); @@ -69,6 +69,6 @@ async fn test_demo_saga(cptestctx: &ControlPlaneTestContext) { assert!(matches!(found.state, SagaState::Succeeded)); } -async fn list_sagas(client: &nexus_client::Client) -> Vec { +async fn list_sagas(client: &nexus_lockstep_client::Client) -> Vec { client.saga_list_stream(None, None).try_collect::>().await.unwrap() } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 483b000f957..ae12c3b37f4 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -2348,8 +2348,8 @@ async fn test_disk_expunge(cptestctx: &ControlPlaneTestContext) { assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); // Expunge the sled - let int_client = &cptestctx.internal_client; - int_client + cptestctx + .lockstep_client .make_request( Method::POST, "/sleds/expunge", diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 9b7a7ecc24e..65e5ede0a70 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -744,7 +744,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { } let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -793,7 +793,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); let instance = NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id })) .expect_status(Some(StatusCode::OK)), ) @@ -916,7 +916,7 @@ async fn test_instance_migrate_v2p_and_routes( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -995,7 +995,7 @@ async fn test_instance_migrate_v2p_and_routes( let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); let _ = NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id })) .expect_status(Some(StatusCode::OK)), ) @@ -1117,7 +1117,7 @@ async fn test_instance_migration_compatible_cpu_platforms( } let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -1182,7 +1182,7 @@ async fn test_instance_migration_compatible_cpu_platforms( let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); let instance = NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id })) .expect_status(Some(StatusCode::OK)), ) @@ -1307,7 +1307,7 @@ async fn test_instance_migration_incompatible_cpu_platforms( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -1369,7 +1369,7 @@ async fn test_instance_migration_incompatible_cpu_platforms( let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id: milan_sled_id })) .expect_status(Some(http::StatusCode::INSUFFICIENT_STORAGE)), ) @@ -1384,7 +1384,7 @@ async fn test_instance_migration_unknown_sled_type( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -1457,7 +1457,7 @@ async fn test_instance_migration_unknown_sled_type( let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id })) .expect_status(Some(expected_status)), ) @@ -1588,7 +1588,7 @@ async fn test_instance_failed_by_instance_watcher_can_be_deleted( .await; nexus_test_utils::background::activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "instance_watcher", ) .await; @@ -1617,7 +1617,7 @@ async fn test_instance_failed_by_instance_watcher_can_be_restarted( .await; nexus_test_utils::background::activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "instance_watcher", ) .await; @@ -1727,8 +1727,8 @@ async fn test_instance_failed_when_on_expunged_sled( "expunging sled"; "sled_id" => %default_sled_id, ); - let int_client = &cptestctx.internal_client; - int_client + cptestctx + .lockstep_client .make_request( Method::POST, "/sleds/expunge", @@ -1784,7 +1784,7 @@ async fn test_instance_failed_by_instance_watcher_automatically_reincarnates( dbg!( nexus_test_utils::background::activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "instance_watcher", ) .await @@ -1858,7 +1858,7 @@ async fn test_instance_failed_by_stop_request_does_not_reincarnate( // Activate the reincarnation task. dbg!( nexus_test_utils::background::activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "instance_reincarnation", ) .await @@ -1995,7 +1995,7 @@ async fn test_instances_are_not_marked_failed_on_other_sled_agent_errors_by_inst .await; nexus_test_utils::background::activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "instance_watcher", ) .await; @@ -2228,7 +2228,7 @@ async fn test_instance_metrics_with_migration( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -2312,7 +2312,7 @@ async fn test_instance_metrics_with_migration( let migrate_url = format!("/instances/{}/migrate", &instance_id.to_string()); let _ = NexusRequest::new( - RequestBuilder::new(internal_client, Method::POST, &migrate_url) + RequestBuilder::new(lockstep_client, Method::POST, &migrate_url) .body(Some(&InstanceMigrateRequest { dst_sled_id })) .expect_status(Some(StatusCode::OK)), ) diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 0a9bf48475e..effb5a7bd45 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -258,14 +258,14 @@ async fn test_instance_watcher_metrics( filter timestamp > @2000-01-01"; let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let nexus = &cptestctx.server.server_context().nexus; let oximeter = &cptestctx.oximeter; let activate_instance_watcher = || async { use nexus_test_utils::background::activate_background_task; - let _ = activate_background_task(&internal_client, "instance_watcher") + let _ = activate_background_task(&lockstep_client, "instance_watcher") .await; }; @@ -479,11 +479,11 @@ async fn test_project_timeseries_query( let i2p1 = create_instance(&client, "project1", "instance2").await; let _i3p2 = create_instance(&client, "project2", "instance3").await; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; // get the instance metrics to show up let _ = - activate_background_task(&internal_client, "instance_watcher").await; + activate_background_task(&lockstep_client, "instance_watcher").await; // Query with no project specified let q1 = "get virtual_machine:check"; diff --git a/nexus/tests/integration_tests/quiesce.rs b/nexus/tests/integration_tests/quiesce.rs index 3ab3452b62e..2d80bb1e16b 100644 --- a/nexus/tests/integration_tests/quiesce.rs +++ b/nexus/tests/integration_tests/quiesce.rs @@ -4,7 +4,7 @@ use anyhow::{Context, anyhow}; use nexus_auth::context::OpContext; -use nexus_client::types::QuiesceState; +use nexus_lockstep_client::types::QuiesceState; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::PlannerRng; use nexus_reconfigurator_preparation::PlanningInputFromDb; @@ -29,12 +29,12 @@ async fn test_quiesce(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(log.clone(), datastore.clone()); - let nexus_internal_url = format!( + let nexus_lockstep_url = format!( "http://{}", - cptestctx.server.get_http_server_internal_address().await + cptestctx.server.get_http_server_lockstep_address().await ); let nexus_client = - nexus_client::Client::new(&nexus_internal_url, log.clone()); + nexus_lockstep_client::Client::new(&nexus_lockstep_url, log.clone()); // Collect what we need to modify the blueprint. let collection = wait_for_condition( diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 40d7b41fb48..f2f01b48a47 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -5,11 +5,11 @@ use dropshot::ResultsPage; use http::Method; use http::StatusCode; -use nexus_client::types::SledId; use nexus_db_model::SledBaseboard; use nexus_db_model::SledCpuFamily as DbSledCpuFamily; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; +use nexus_lockstep_client::types::SledId; use nexus_sled_agent_shared::inventory::SledCpuFamily; use nexus_sled_agent_shared::inventory::SledRole; use nexus_test_utils::TEST_SUITE_PASSWORD; diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 83405813063..80807a78eb3 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -1680,8 +1680,8 @@ async fn test_snapshot_expunge(cptestctx: &ControlPlaneTestContext) { .await; // Expunge the sled - let int_client = &cptestctx.internal_client; - int_client + cptestctx + .lockstep_client .make_request( Method::POST, "/sleds/expunge", diff --git a/nexus/tests/integration_tests/support_bundles.rs b/nexus/tests/integration_tests/support_bundles.rs index 215936dbecb..64ecd43e171 100644 --- a/nexus/tests/integration_tests/support_bundles.rs +++ b/nexus/tests/integration_tests/support_bundles.rs @@ -10,7 +10,7 @@ use dropshot::HttpErrorResponseBody; use dropshot::test_util::ClientTestContext; use http::StatusCode; use http::method::Method; -use nexus_client::types::LastResult; +use nexus_lockstep_client::types::LastResult; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -338,7 +338,7 @@ async fn activate_bundle_collection_background_task( use nexus_test_utils::background::activate_background_task; let task = activate_background_task( - &cptestctx.internal_client, + &cptestctx.lockstep_client, "support_bundle_collector", ) .await; diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index ef47345cfae..0f55a3e7f86 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -152,7 +152,7 @@ async fn test_repo_upload_unconfigured() -> Result<()> { // The artifact replication background task should have nothing to do. let status = - run_tuf_artifact_replication_step(&cptestctx.internal_client).await; + run_tuf_artifact_replication_step(&cptestctx.lockstep_client).await; assert_eq!( status.last_run_counters.put_ok + status.last_run_counters.copy_ok, 0 @@ -248,7 +248,7 @@ async fn test_repo_upload() -> Result<()> { // The artifact replication background task should have been activated, and // we should see a local repo and successful PUTs. let status = - wait_tuf_artifact_replication_step(&cptestctx.internal_client).await; + wait_tuf_artifact_replication_step(&cptestctx.lockstep_client).await; eprintln!("{status:?}"); assert_eq!(status.generation, 2u32.into()); assert_eq!(status.last_run_counters.put_config_ok, 4); @@ -267,7 +267,7 @@ async fn test_repo_upload() -> Result<()> { // Run the replication background task again; the local repos should be // dropped. let status = - run_tuf_artifact_replication_step(&cptestctx.internal_client).await; + run_tuf_artifact_replication_step(&cptestctx.lockstep_client).await; eprintln!("{status:?}"); assert_eq!(status.last_run_counters.put_config_ok, 4); assert_eq!(status.last_run_counters.list_ok, 4); @@ -516,7 +516,7 @@ async fn test_repo_upload() -> Result<()> { ); // ... and the task will have one artifact to replicate. let status = - wait_tuf_artifact_replication_step(&cptestctx.internal_client).await; + wait_tuf_artifact_replication_step(&cptestctx.lockstep_client).await; eprintln!("{status:?}"); assert_eq!(status.generation, 3u32.into()); assert_eq!(status.last_run_counters.list_ok, 4); diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 6da752f7e71..5975a51d7f7 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -4062,7 +4062,7 @@ async fn test_read_only_region_reference_counting( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -4119,7 +4119,7 @@ async fn test_read_only_region_reference_counting( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; // The snapshot's allocated regions should have the one read-only region @@ -4330,7 +4330,7 @@ async fn test_read_only_region_reference_counting_layers( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -4387,7 +4387,7 @@ async fn test_read_only_region_reference_counting_layers( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; // Grab the read-only region in the snapshot volume @@ -5581,7 +5581,7 @@ async fn test_double_layer_with_read_only_region_delete( // 6) At the end, assert that all Crucible resources were cleaned up let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -5646,7 +5646,7 @@ async fn test_double_layer_with_read_only_region_delete( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; assert!(!disk_test.crucible_resources_deleted().await); @@ -5706,7 +5706,7 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( // 6) At the end, assert that all Crucible resources were cleaned up let client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -5757,7 +5757,7 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; wait_for_condition( || { @@ -5803,7 +5803,7 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; assert!(!disk_test.crucible_resources_deleted().await); @@ -5832,7 +5832,7 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( .await .unwrap(); - wait_for_all_replacements(datastore, &internal_client).await; + wait_for_all_replacements(datastore, &lockstep_client).await; assert!(!disk_test.crucible_resources_deleted().await); diff --git a/nexus/tests/integration_tests/webhooks.rs b/nexus/tests/integration_tests/webhooks.rs index f7d6568be71..dab34a8d3e3 100644 --- a/nexus/tests/integration_tests/webhooks.rs +++ b/nexus/tests/integration_tests/webhooks.rs @@ -486,7 +486,7 @@ async fn test_cannot_subscribe_to_probes(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_event_delivery(cptestctx: &ControlPlaneTestContext) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore(); let opctx = @@ -539,9 +539,9 @@ async fn test_event_delivery(cptestctx: &ControlPlaneTestContext) { .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_async().await; @@ -550,7 +550,7 @@ async fn test_event_delivery(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_multiple_secrets(cptestctx: &ControlPlaneTestContext) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore(); let opctx = @@ -661,9 +661,9 @@ async fn test_multiple_secrets(cptestctx: &ControlPlaneTestContext) { .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_async().await; @@ -672,7 +672,7 @@ async fn test_multiple_secrets(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_multiple_receivers(cptestctx: &ControlPlaneTestContext) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let client = &cptestctx.external_client; let datastore = nexus.datastore(); @@ -837,9 +837,9 @@ async fn test_multiple_receivers(cptestctx: &ControlPlaneTestContext) { .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); // The `test.foo.bar` receiver should have received 1 event. @@ -855,7 +855,7 @@ async fn test_multiple_receivers(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore(); let opctx = @@ -908,9 +908,9 @@ async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; @@ -941,7 +941,7 @@ async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { // Okay, we are now in backoff. Activate the deliverator again --- no new // event should be delivered. dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); // Activating the deliverator whilst in backoff should not send another // request. @@ -978,13 +978,13 @@ async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { // Wait out the backoff period for the first request. tokio::time::sleep(std::time::Duration::from_secs(15)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; // Again, we should be in backoff, so no request will be sent. dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; mock.delete_async().await; @@ -1048,13 +1048,13 @@ async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { // tokio::time::sleep(std::time::Duration::from_secs(15)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(0).await; tokio::time::sleep(std::time::Duration::from_secs(5)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_async().await; @@ -1270,7 +1270,7 @@ async fn test_probe_resends_failed_deliveries( cptestctx: &ControlPlaneTestContext, ) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let server = httpmock::MockServer::start_async().await; let datastore = nexus.datastore(); @@ -1329,23 +1329,23 @@ async fn test_probe_resends_failed_deliveries( .expect("event2 should be published successfully") ); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(2).await; // Backoff 1 tokio::time::sleep(std::time::Duration::from_secs(11)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(4).await; // Backoff 2 tokio::time::sleep(std::time::Duration::from_secs(22)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(6).await; @@ -1414,7 +1414,7 @@ async fn test_probe_resends_failed_deliveries( // Both events should be resent. dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(2).await; } @@ -1424,7 +1424,7 @@ async fn test_api_resends_failed_deliveries( cptestctx: &ControlPlaneTestContext, ) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let client = &cptestctx.external_client; let server = httpmock::MockServer::start_async().await; @@ -1490,18 +1490,18 @@ async fn test_api_resends_failed_deliveries( .expect("event should be published successfully"); dbg!(event2); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); tokio::time::sleep(std::time::Duration::from_secs(11)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); tokio::time::sleep(std::time::Duration::from_secs(22)).await; dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(3).await; @@ -1543,7 +1543,7 @@ async fn test_api_resends_failed_deliveries( dbg!(error); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; } @@ -1563,7 +1563,7 @@ async fn subscription_add_test( new_subscription: &str, ) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore(); let opctx = @@ -1618,9 +1618,9 @@ async fn subscription_add_test( .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(0).await; @@ -1651,9 +1651,9 @@ async fn subscription_add_test( .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; @@ -1680,7 +1680,7 @@ async fn subscription_remove_test( deleted_subscription: &str, ) { let nexus = cptestctx.server.server_context().nexus.clone(); - let internal_client = &cptestctx.internal_client; + let lockstep_client = &cptestctx.lockstep_client; let datastore = nexus.datastore(); let opctx = @@ -1750,9 +1750,9 @@ async fn subscription_remove_test( .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; @@ -1782,9 +1782,9 @@ async fn subscription_remove_test( .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); // No new calls should be observed. @@ -1829,20 +1829,10 @@ async fn subscription_remove_test( .expect("event should be published successfully"); dbg!(event); - dbg!(activate_background_task(internal_client, "alert_dispatcher").await); + dbg!(activate_background_task(lockstep_client, "alert_dispatcher").await); dbg!( - activate_background_task(internal_client, "webhook_deliverator").await + activate_background_task(lockstep_client, "webhook_deliverator").await ); mock.assert_calls_async(1).await; - - // Deleting a subscription that doesn't exist should 404. - dbg!( - resource_helpers::object_delete_error( - &internal_client, - &subscription_remove_url(rx_id, &deleted_subscription), - http::StatusCode::NOT_FOUND - ) - .await - ); } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 2764e19fca4..7b1d7ea36a9 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -10,39 +10,10 @@ "version": "0.0.1" }, "paths": { - "/bgtasks": { - "get": { - "summary": "List background tasks", - "description": "This is a list of discrete background activities that Nexus carries out. This is exposed for support and debugging.", - "operationId": "bgtask_list", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "title": "Map_of_BackgroundTask", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BackgroundTask" - } - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/bgtasks/activate": { "post": { - "summary": "Activates one or more background tasks, causing them to be run immediately", - "description": "if idle, or scheduled to run again as soon as possible if already running.", + "summary": "**Do not use in new code!**", + "description": "Callers to this API should either be capable of using the nexus-lockstep API or should be rewritten to use a doorbell API to activate a specific task. Task names are internal to Nexus.", "operationId": "bgtask_activate", "requestBody": { "content": { @@ -67,90 +38,6 @@ } } }, - "/bgtasks/view/{bgtask_name}": { - "get": { - "summary": "Fetch status of one background task", - "description": "This is exposed for support and debugging.", - "operationId": "bgtask_view", - "parameters": [ - { - "in": "path", - "name": "bgtask_name", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BackgroundTask" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/clickhouse/policy": { - "get": { - "summary": "Get the current clickhouse policy", - "operationId": "clickhouse_policy_get", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClickhousePolicy" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "post": { - "summary": "Set the new clickhouse policy", - "operationId": "clickhouse_policy_set", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClickhousePolicy" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stop-request": { "post": { "summary": "An Upstairs will update this endpoint if a Downstairs client task is", @@ -362,21 +249,25 @@ } } }, - "/demo-saga": { + "/disk/{disk_id}/remove-read-only-parent": { "post": { - "summary": "Kick off an instance of the \"demo\" saga", - "description": "This saga is used for demo and testing. The saga just waits until you complete using the `saga_demo_complete` API.", - "operationId": "saga_demo_create", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DemoSaga" - } - } + "summary": "Request removal of a read_only_parent from a disk.", + "description": "This is a thin wrapper around the volume_remove_read_only_parent saga. All we are doing here is, given a disk UUID, figure out what the volume_id is for that disk, then use that to call the disk_remove_read_only_parent saga on it.", + "operationId": "cpapi_disk_remove_read_only_parent", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" } + } + ], + "responses": { + "204": { + "description": "resource updated" }, "4XX": { "$ref": "#/components/responses/Error" @@ -387,21 +278,58 @@ } } }, - "/demo-saga/{demo_saga_id}/complete": { - "post": { - "summary": "Complete a waiting demo saga", - "description": "Note that the id used here is not the same as the id of the saga. It's the one returned by the `saga_demo_create` API.", - "operationId": "saga_demo_complete", + "/disks/{disk_id}": { + "put": { + "summary": "Report updated state for a disk.", + "operationId": "cpapi_disks_put", "parameters": [ { "in": "path", - "name": "demo_saga_id", + "name": "disk_id", "required": true, "schema": { - "$ref": "#/components/schemas/TypedUuidForDemoSagaKind" + "type": "string", + "format": "uuid" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiskRuntimeState" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/metrics/collectors": { + "post": { + "summary": "Accept a notification of a new oximeter collection server.", + "operationId": "cpapi_collectors_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OximeterInfo" + } + } + }, + "required": true + }, "responses": { "204": { "description": "resource updated" @@ -415,11 +343,21 @@ } } }, - "/deployment/blueprints/all": { + "/metrics/collectors/{collector_id}/producers": { "get": { - "summary": "Lists blueprints", - "operationId": "blueprint_list", + "summary": "List all metric producers assigned to an oximeter collector.", + "operationId": "cpapi_assigned_producers_list", "parameters": [ + { + "in": "path", + "name": "collector_id", + "description": "The ID of the oximeter collector.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, { "in": "query", "name": "limit", @@ -454,7 +392,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/BlueprintMetadataResultsPage" + "$ref": "#/components/schemas/ProducerEndpointResultsPage" } } } @@ -471,29 +409,27 @@ } } }, - "/deployment/blueprints/all/{blueprint_id}": { - "get": { - "summary": "Fetches one blueprint", - "operationId": "blueprint_view", - "parameters": [ - { - "in": "path", - "name": "blueprint_id", - "description": "ID of the blueprint", - "required": true, - "schema": { - "type": "string", - "format": "uuid" + "/metrics/producers": { + "post": { + "summary": "Accept a registration from a new metric producer", + "operationId": "cpapi_producers_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProducerEndpoint" + } } - } - ], + }, + "required": true + }, "responses": { - "200": { - "description": "successful operation", + "201": { + "description": "successful creation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Blueprint" + "$ref": "#/components/schemas/ProducerRegistrationResponse" } } } @@ -505,25 +441,49 @@ "$ref": "#/components/responses/Error" } } - }, - "delete": { - "summary": "Deletes one blueprint", - "operationId": "blueprint_delete", + } + }, + "/nat/ipv4/changeset/{from_gen}": { + "get": { + "summary": "Fetch NAT ChangeSet", + "description": "Caller provides their generation as `from_gen`, along with a query parameter for the page size (`limit`). Endpoint will return changes that have occured since the caller's generation number up to the latest change or until the `limit` is reached. If there are no changes, an empty vec is returned.", + "operationId": "ipv4_nat_changeset", "parameters": [ { "in": "path", - "name": "blueprint_id", - "description": "ID of the blueprint", + "name": "from_gen", + "description": "which change number to start generating the change set from", "required": true, "schema": { - "type": "string", - "format": "uuid" + "type": "integer", + "format": "int64" + } + }, + { + "in": "query", + "name": "limit", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 } } ], "responses": { - "204": { - "description": "successful deletion" + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_NatEntryView", + "type": "array", + "items": { + "$ref": "#/components/schemas/NatEntryView" + } + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -534,102 +494,59 @@ } } }, - "/deployment/blueprints/import": { - "post": { - "summary": "Imports a client-provided blueprint", - "description": "This is intended for development and support, not end users or operators.", - "operationId": "blueprint_import", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Blueprint" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/deployment/blueprints/regenerate": { - "post": { - "summary": "Generates a new blueprint for the current system, re-evaluating anything", - "description": "that's changed since the last one was generated", - "operationId": "blueprint_regenerate", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Blueprint" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/deployment/blueprints/target": { + "/probes/{sled}": { "get": { - "summary": "Fetches the current target blueprint, if any", - "operationId": "blueprint_target_view", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BlueprintTarget" - } - } + "summary": "Get all the probes associated with a given sled.", + "operationId": "probes_get", + "parameters": [ + { + "in": "path", + "name": "sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" } }, - "4XX": { - "$ref": "#/components/responses/Error" + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "post": { - "summary": "Make the specified blueprint the new target", - "operationId": "blueprint_target_set", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BlueprintTargetSet" - } + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" } }, - "required": true - }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/BlueprintTarget" + "title": "Array_of_ProbeInfo", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeInfo" + } } } } @@ -640,33 +557,41 @@ "5XX": { "$ref": "#/components/responses/Error" } + }, + "x-dropshot-pagination": { + "required": [] } } }, - "/deployment/blueprints/target/enabled": { + "/racks/{rack_id}/initialization-complete": { "put": { - "summary": "Set the `enabled` field of the current target blueprint", - "operationId": "blueprint_target_set_enabled", + "summary": "Report that the Rack Setup Service initialization is complete", + "description": "See RFD 278 for more details.", + "operationId": "rack_initialization_complete", + "parameters": [ + { + "in": "path", + "name": "rack_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/BlueprintTargetSet" + "$ref": "#/components/schemas/RackInitializationRequest" } } }, "required": true }, "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/BlueprintTarget" - } - } - } + "204": { + "description": "resource updated" }, "4XX": { "$ref": "#/components/responses/Error" @@ -677,17 +602,27 @@ } } }, - "/deployment/reconfigurator-config": { + "/sled-agents/{sled_id}": { "get": { - "summary": "Get the current reconfigurator configuration", - "operationId": "reconfigurator_config_show_current", + "summary": "Return information about the given sled agent", + "operationId": "sled_agent_get", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + } + ], "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ReconfiguratorConfigView" + "$ref": "#/components/schemas/SledAgentInfo" } } } @@ -701,13 +636,23 @@ } }, "post": { - "summary": "Update the reconfigurator config at the latest versions", - "operationId": "reconfigurator_config_set", + "summary": "Report that the sled agent for the specified sled has come online.", + "operationId": "sled_agent_put", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ReconfiguratorConfigParam" + "$ref": "#/components/schemas/SledAgentInfo" } } }, @@ -726,29 +671,65 @@ } } }, - "/deployment/reconfigurator-config/{version}": { - "get": { - "summary": "Get the reconfigurator config at `version` if it exists", - "operationId": "reconfigurator_config_show", + "/sled-agents/{sled_id}/firewall-rules-update": { + "post": { + "summary": "Request a new set of firewall rules for a sled.", + "description": "This causes Nexus to read the latest set of rules for the sled, and call a Sled endpoint which applies the rules to all OPTE ports that happen to exist.", + "operationId": "sled_firewall_rules_request", "parameters": [ { "in": "path", - "name": "version", + "name": "sled_id", "required": true, "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/switch/{switch_id}": { + "put": { + "operationId": "switch_put", + "parameters": [ + { + "in": "path", + "name": "switch_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchPutRequest" + } + } + }, + "required": true + }, "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ReconfiguratorConfigView" + "$ref": "#/components/schemas/SwitchPutResponse" } } } @@ -762,17 +743,18 @@ } } }, - "/deployment/update-status": { + "/v1/ping": { "get": { - "summary": "Show deployed versions of artifacts", - "operationId": "update_status", + "summary": "Ping API", + "description": "Always responds with Ok if it responds at all.", + "operationId": "ping", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UpdateStatus" + "$ref": "#/components/schemas/Ping" } } } @@ -786,22 +768,30 @@ } } }, - "/disk/{disk_id}/remove-read-only-parent": { - "post": { - "summary": "Request removal of a read_only_parent from a disk.", - "description": "This is a thin wrapper around the volume_remove_read_only_parent saga. All we are doing here is, given a disk UUID, figure out what the volume_id is for that disk, then use that to call the disk_remove_read_only_parent saga on it.", - "operationId": "cpapi_disk_remove_read_only_parent", + "/vmms/{propolis_id}": { + "put": { + "summary": "Report updated state for a VMM.", + "operationId": "cpapi_instances_put", "parameters": [ { "in": "path", - "name": "disk_id", + "name": "propolis_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForPropolisKind" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledVmmState" + } + } + }, + "required": true + }, "responses": { "204": { "description": "resource updated" @@ -815,31 +805,21 @@ } } }, - "/disks/{disk_id}": { - "put": { - "summary": "Report updated state for a disk.", - "operationId": "cpapi_disks_put", + "/volume/{volume_id}/remove-read-only-parent": { + "post": { + "summary": "Request removal of a read_only_parent from a volume.", + "description": "A volume can be created with the source data for that volume being another volume that attached as a \"read_only_parent\". In the background there exists a scrubber that will copy the data from the read_only_parent into the volume. When that scrubber has completed copying the data, this endpoint can be called to update the database that the read_only_parent is no longer needed for a volume and future attachments of this volume should not include that read_only_parent.", + "operationId": "cpapi_volume_remove_read_only_parent", "parameters": [ { "in": "path", - "name": "disk_id", + "name": "volume_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForVolumeKind" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DiskRuntimeState" - } - } - }, - "required": true - }, "responses": { "204": { "description": "resource updated" @@ -852,6431 +832,2996 @@ } } } - }, - "/experimental/v1/system/support-bundles": { - "get": { - "summary": "List all support bundles", - "operationId": "support_bundle_list", - "parameters": [ - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/TimeAndIdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleInfoResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [] - } - }, - "post": { - "summary": "Create a new support bundle", - "operationId": "support_bundle_create", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleCreate" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleInfo" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/experimental/v1/system/support-bundles/{bundle_id}": { - "get": { - "summary": "View a support bundle", - "operationId": "support_bundle_view", - "parameters": [ - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleInfo" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "put": { - "summary": "Update a support bundle", - "operationId": "support_bundle_update", - "parameters": [ - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleUpdate" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupportBundleInfo" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "delete": { - "summary": "Delete an existing support bundle", - "description": "May also be used to cancel a support bundle which is currently being collected, or to remove metadata for a support bundle that has failed.", - "operationId": "support_bundle_delete", - "parameters": [ - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "204": { - "description": "successful deletion" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/experimental/v1/system/support-bundles/{bundle_id}/download": { - "get": { - "summary": "Download the contents of a support bundle", - "operationId": "support_bundle_download", - "parameters": [ - { - "in": "header", - "name": "range", - "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", - "schema": { - "type": "string" - } - }, - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "default": { - "description": "", - "content": { - "*/*": { - "schema": {} - } - } - } - } - }, - "head": { - "summary": "Download the metadata of a support bundle", - "operationId": "support_bundle_head", - "parameters": [ - { - "in": "header", - "name": "range", - "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", - "schema": { - "type": "string" - } - }, - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "default": { - "description": "", - "content": { - "*/*": { - "schema": {} - } - } - } - } - } - }, - "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}": { - "get": { - "summary": "Download a file within a support bundle", - "operationId": "support_bundle_download_file", - "parameters": [ - { - "in": "header", - "name": "range", - "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", - "schema": { - "type": "string" - } - }, - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "path", - "name": "file", - "description": "The file within the bundle to download", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "default": { - "description": "", - "content": { - "*/*": { - "schema": {} - } - } - } - } - }, - "head": { - "summary": "Download the metadata of a file within the support bundle", - "operationId": "support_bundle_head_file", - "parameters": [ - { - "in": "header", - "name": "range", - "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", - "schema": { - "type": "string" - } - }, - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "path", - "name": "file", - "description": "The file within the bundle to download", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "default": { - "description": "", - "content": { - "*/*": { - "schema": {} - } - } - } - } - } - }, - "/experimental/v1/system/support-bundles/{bundle_id}/index": { - "get": { - "summary": "Download the index of a support bundle", - "operationId": "support_bundle_index", - "parameters": [ - { - "in": "header", - "name": "range", - "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", - "schema": { - "type": "string" - } - }, - { - "in": "path", - "name": "bundle_id", - "description": "ID of the support bundle", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "default": { - "description": "", - "content": { - "*/*": { - "schema": {} - } - } - } - } - } - }, - "/instances/{instance_id}/migrate": { - "post": { - "operationId": "instance_migrate", - "parameters": [ - { - "in": "path", - "name": "instance_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/InstanceMigrateRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Instance" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/metrics/collectors": { - "post": { - "summary": "Accept a notification of a new oximeter collection server.", - "operationId": "cpapi_collectors_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OximeterInfo" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/metrics/collectors/{collector_id}/producers": { - "get": { - "summary": "List all metric producers assigned to an oximeter collector.", - "operationId": "cpapi_assigned_producers_list", - "parameters": [ - { - "in": "path", - "name": "collector_id", - "description": "The ID of the oximeter collector.", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/IdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ProducerEndpointResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [] - } - } - }, - "/metrics/producers": { - "post": { - "summary": "Accept a registration from a new metric producer", - "operationId": "cpapi_producers_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ProducerEndpoint" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ProducerRegistrationResponse" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/mgs-updates": { - "get": { - "summary": "Fetch information about ongoing MGS updates", - "operationId": "mgs_updates", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MgsUpdateDriverStatus" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/nat/ipv4/changeset/{from_gen}": { - "get": { - "summary": "Fetch NAT ChangeSet", - "description": "Caller provides their generation as `from_gen`, along with a query parameter for the page size (`limit`). Endpoint will return changes that have occured since the caller's generation number up to the latest change or until the `limit` is reached. If there are no changes, an empty vec is returned.", - "operationId": "ipv4_nat_changeset", - "parameters": [ - { - "in": "path", - "name": "from_gen", - "description": "which change number to start generating the change set from", - "required": true, - "schema": { - "type": "integer", - "format": "int64" - } - }, - { - "in": "query", - "name": "limit", - "required": true, - "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "title": "Array_of_NatEntryView", - "type": "array", - "items": { - "$ref": "#/components/schemas/NatEntryView" - } - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/oximeter/read-policy": { - "get": { - "summary": "Get the current oximeter read policy", - "operationId": "oximeter_read_policy_get", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OximeterReadPolicy" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "post": { - "summary": "Set the new oximeter read policy", - "operationId": "oximeter_read_policy_set", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OximeterReadPolicy" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/physical-disk/expunge": { - "post": { - "summary": "Mark a physical disk as expunged", - "description": "This is an irreversible process! It should only be called after sufficient warning to the operator.\n\nThis is idempotent.", - "operationId": "physical_disk_expunge", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PhysicalDiskPath" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/probes/{sled}": { - "get": { - "summary": "Get all the probes associated with a given sled.", - "operationId": "probes_get", - "parameters": [ - { - "in": "path", - "name": "sled", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/IdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "title": "Array_of_ProbeInfo", - "type": "array", - "items": { - "$ref": "#/components/schemas/ProbeInfo" - } - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [] - } - } - }, - "/quiesce": { - "get": { - "summary": "Check whether Nexus is running normally, quiescing, or fully quiesced.", - "operationId": "quiesce_get", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QuiesceStatus" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "post": { - "summary": "Begin quiescing this Nexus instance", - "description": "This causes no new sagas to be started and eventually causes no database connections to become available. This is a one-way trip. There's no unquiescing Nexus.", - "operationId": "quiesce_start", - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/racks/{rack_id}/initialization-complete": { - "put": { - "summary": "Report that the Rack Setup Service initialization is complete", - "description": "See RFD 278 for more details.", - "operationId": "rack_initialization_complete", - "parameters": [ - { - "in": "path", - "name": "rack_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RackInitializationRequest" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sagas": { - "get": { - "summary": "List sagas", - "operationId": "saga_list", - "parameters": [ - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, - { - "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/IdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SagaResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [] - } - } - }, - "/sagas/{saga_id}": { - "get": { - "summary": "Fetch a saga", - "operationId": "saga_view", - "parameters": [ - { - "in": "path", - "name": "saga_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Saga" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sled-agents/{sled_id}": { - "get": { - "summary": "Return information about the given sled agent", - "operationId": "sled_agent_get", - "parameters": [ - { - "in": "path", - "name": "sled_id", - "required": true, - "schema": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledAgentInfo" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "post": { - "summary": "Report that the sled agent for the specified sled has come online.", - "operationId": "sled_agent_put", - "parameters": [ - { - "in": "path", - "name": "sled_id", - "required": true, - "schema": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledAgentInfo" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sled-agents/{sled_id}/firewall-rules-update": { - "post": { - "summary": "Request a new set of firewall rules for a sled.", - "description": "This causes Nexus to read the latest set of rules for the sled, and call a Sled endpoint which applies the rules to all OPTE ports that happen to exist.", - "operationId": "sled_firewall_rules_request", - "parameters": [ - { - "in": "path", - "name": "sled_id", - "required": true, - "schema": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - } - } - ], - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sleds/add": { - "post": { - "summary": "Add sled to initialized rack", - "operationId": "sled_add", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UninitializedSledId" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledId" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sleds/expunge": { - "post": { - "summary": "Mark a sled as expunged", - "description": "This is an irreversible process! It should only be called after sufficient warning to the operator.\n\nThis is idempotent, and it returns the old policy of the sled.", - "operationId": "sled_expunge", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledSelector" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledPolicy" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/sleds/uninitialized": { - "get": { - "summary": "List uninitialized sleds", - "operationId": "sled_list_uninitialized", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UninitializedSledResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/switch/{switch_id}": { - "put": { - "operationId": "switch_put", - "parameters": [ - { - "in": "path", - "name": "switch_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SwitchPutRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SwitchPutResponse" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/v1/ping": { - "get": { - "summary": "Ping API", - "description": "Always responds with Ok if it responds at all.", - "operationId": "ping", - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Ping" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/vmms/{propolis_id}": { - "put": { - "summary": "Report updated state for a VMM.", - "operationId": "cpapi_instances_put", - "parameters": [ - { - "in": "path", - "name": "propolis_id", - "required": true, - "schema": { - "$ref": "#/components/schemas/TypedUuidForPropolisKind" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SledVmmState" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, - "/volume/{volume_id}/remove-read-only-parent": { - "post": { - "summary": "Request removal of a read_only_parent from a volume.", - "description": "A volume can be created with the source data for that volume being another volume that attached as a \"read_only_parent\". In the background there exists a scrubber that will copy the data from the read_only_parent into the volume. When that scrubber has completed copying the data, this endpoint can be called to update the database that the read_only_parent is no longer needed for a volume and future attachments of this volume should not include that read_only_parent.", - "operationId": "cpapi_volume_remove_read_only_parent", - "parameters": [ - { - "in": "path", - "name": "volume_id", - "required": true, - "schema": { - "$ref": "#/components/schemas/TypedUuidForVolumeKind" - } - } - ], - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - } - }, - "components": { - "schemas": { - "ActivationReason": { - "description": "Describes why a background task was activated\n\nThis is only used for debugging. This is deliberately not made available to the background task itself. See \"Design notes\" in the module-level documentation for details.", - "type": "string", - "enum": [ - "signaled", - "timeout", - "dependency" - ] - }, - "AllowedSourceIps": { - "description": "Description of source IPs allowed to reach rack services.", - "oneOf": [ - { - "description": "Allow traffic from any external IP address.", - "type": "object", - "properties": { - "allow": { - "type": "string", - "enum": [ - "any" - ] - } - }, - "required": [ - "allow" - ] - }, - { - "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", - "type": "object", - "properties": { - "allow": { - "type": "string", - "enum": [ - "list" - ] - }, - "ips": { - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNet" - } - } - }, - "required": [ - "allow", - "ips" - ] - } - ] - }, - "ArtifactVersion": { - "description": "An artifact version.\n\nThis is a freeform identifier with some basic validation. It may be the serialized form of a semver version, or a custom identifier that uses the same character set as a semver, plus `_`.\n\nThe exact pattern accepted is `^[a-zA-Z0-9._+-]{1,63}$`.\n\n# Ord implementation\n\n`ArtifactVersion`s are not intended to be sorted, just compared for equality. `ArtifactVersion` implements `Ord` only for storage within sorted collections.", - "type": "string", - "pattern": "^[a-zA-Z0-9._+-]{1,63}$" - }, - "BackgroundTask": { - "description": "Background tasks\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", - "type": "object", - "properties": { - "current": { - "description": "Describes the current task status", - "allOf": [ - { - "$ref": "#/components/schemas/CurrentStatus" - } - ] - }, - "description": { - "description": "brief summary (for developers) of what this task does", - "type": "string" - }, - "last": { - "description": "Describes the last completed activation", - "allOf": [ - { - "$ref": "#/components/schemas/LastResult" - } - ] - }, - "name": { - "description": "unique identifier for this background task", - "type": "string" - }, - "period": { - "description": "how long after an activation completes before another will be triggered automatically\n\n(activations can also be triggered for other reasons)", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] - } - }, - "required": [ - "current", - "description", - "last", - "name", - "period" - ] - }, - "BackgroundTasksActivateRequest": { - "description": "Query parameters for Background Task activation requests.", - "type": "object", - "properties": { - "bgtask_names": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "required": [ - "bgtask_names" - ] - }, - "Baseboard": { - "description": "Properties that uniquely identify an Oxide hardware component", - "type": "object", - "properties": { - "part": { - "type": "string" - }, - "revision": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "serial": { - "type": "string" - } - }, - "required": [ - "part", - "revision", - "serial" - ] - }, - "BaseboardId": { - "description": "A unique baseboard id found during a collection\n\nBaseboard ids are the keys used to link up information from disparate sources (like a service processor and a sled agent).\n\nThese are normalized in the database. Each distinct baseboard id is assigned a uuid and shared across the many possible collections that reference it.\n\nUsually, the part number and serial number are combined with a revision number. We do not include that here. If we ever did find a baseboard with the same part number and serial number but a new revision number, we'd want to treat that as the same baseboard as one with a different revision number.", - "type": "object", - "properties": { - "part_number": { - "description": "Oxide Part Number", - "type": "string" - }, - "serial_number": { - "description": "Serial number (unique for a given part number)", - "type": "string" - } - }, - "required": [ - "part_number", - "serial_number" - ] - }, - "BfdMode": { - "description": "BFD connection mode.", - "type": "string", - "enum": [ - "single_hop", - "multi_hop" - ] - }, - "BfdPeerConfig": { - "type": "object", - "properties": { - "detection_threshold": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "local": { - "nullable": true, - "type": "string", - "format": "ip" - }, - "mode": { - "$ref": "#/components/schemas/BfdMode" - }, - "remote": { - "type": "string", - "format": "ip" - }, - "required_rx": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "switch": { - "$ref": "#/components/schemas/SwitchLocation" - } - }, - "required": [ - "detection_threshold", - "mode", - "remote", - "required_rx", - "switch" - ] - }, - "BgpConfig": { - "type": "object", - "properties": { - "asn": { - "description": "The autonomous system number for the BGP configuration.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "checker": { - "nullable": true, - "description": "Checker to apply to incoming messages.", - "default": null, - "type": "string" - }, - "originate": { - "description": "The set of prefixes for the BGP router to originate.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Ipv4Net" - } - }, - "shaper": { - "nullable": true, - "description": "Shaper to apply to outgoing messages.", - "default": null, - "type": "string" - } - }, - "required": [ - "asn", - "originate" - ] - }, - "BgpPeerConfig": { - "type": "object", - "properties": { - "addr": { - "description": "Address of the peer.", - "type": "string", - "format": "ipv4" - }, - "allowed_export": { - "description": "Define export policy for a peer.", - "default": { - "type": "no_filtering" - }, - "allOf": [ - { - "$ref": "#/components/schemas/ImportExportPolicy" - } - ] - }, - "allowed_import": { - "description": "Define import policy for a peer.", - "default": { - "type": "no_filtering" - }, - "allOf": [ - { - "$ref": "#/components/schemas/ImportExportPolicy" - } - ] - }, - "asn": { - "description": "The autonomous system number of the router the peer belongs to.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "communities": { - "description": "Include the provided communities in updates sent to the peer.", - "default": [], - "type": "array", - "items": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - "connect_retry": { - "nullable": true, - "description": "The interval in seconds between peer connection retry attempts.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "delay_open": { - "nullable": true, - "description": "How long to delay sending open messages to a peer. In seconds.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "enforce_first_as": { - "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", - "default": false, - "type": "boolean" - }, - "hold_time": { - "nullable": true, - "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "idle_hold_time": { - "nullable": true, - "description": "How long to keep a peer in idle after a state machine reset in seconds.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "keepalive": { - "nullable": true, - "description": "The interval to send keepalive messages at.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "local_pref": { - "nullable": true, - "description": "Apply a local preference to routes received from this peer.", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "md5_auth_key": { - "nullable": true, - "description": "Use the given key for TCP-MD5 authentication with the peer.", - "default": null, - "type": "string" - }, - "min_ttl": { - "nullable": true, - "description": "Require messages from a peer have a minimum IP time to live field.", - "default": null, - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "multi_exit_discriminator": { - "nullable": true, - "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "port": { - "description": "Switch port the peer is reachable on.", - "type": "string" - }, - "remote_asn": { - "nullable": true, - "description": "Require that a peer has a specified ASN.", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "vlan_id": { - "nullable": true, - "description": "Associate a VLAN ID with a BGP peer session.", - "default": null, - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "addr", - "asn", - "port" - ] - }, - "Blueprint": { - "description": "Describes a complete set of software and configuration for the system", - "type": "object", - "properties": { - "clickhouse_cluster_config": { - "nullable": true, - "description": "Allocation of Clickhouse Servers and Keepers for replicated clickhouse setups. This is set to `None` if replicated clickhouse is not in use.", - "allOf": [ - { - "$ref": "#/components/schemas/ClickhouseClusterConfig" - } - ] - }, - "cockroachdb_fingerprint": { - "description": "CockroachDB state fingerprint when this blueprint was created", - "type": "string" - }, - "cockroachdb_setting_preserve_downgrade": { - "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to", - "allOf": [ - { - "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" - } - ] - }, - "comment": { - "description": "human-readable string describing why this blueprint was created (for debugging)", - "type": "string" - }, - "creator": { - "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", - "type": "string" - }, - "external_dns_version": { - "description": "external DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "id": { - "description": "unique identifier for this blueprint", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, - "internal_dns_version": { - "description": "internal DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "nexus_generation": { - "description": "The generation of the active group of Nexuses\n\nIf a Nexus instance notices it has a nexus_generation less than this value, it will start to quiesce in preparation for handing off control to the newer generation (see: RFD 588).", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "oximeter_read_mode": { - "description": "Whether oximeter should read from a single node or a cluster", - "allOf": [ - { - "$ref": "#/components/schemas/OximeterReadMode" - } - ] - }, - "oximeter_read_version": { - "description": "Oximeter read policy version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "parent_blueprint_id": { - "nullable": true, - "description": "which blueprint this blueprint is based on", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, - "pending_mgs_updates": { - "description": "List of pending MGS-mediated updates", - "allOf": [ - { - "$ref": "#/components/schemas/PendingMgsUpdates" - } - ] - }, - "sleds": { - "description": "A map of sled id -> desired configuration of the sled.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BlueprintSledConfig" - } - }, - "source": { - "description": "Source of this blueprint (can include planning report)", - "allOf": [ - { - "$ref": "#/components/schemas/BlueprintSource" - } - ] - }, - "target_release_minimum_generation": { - "description": "The minimum release generation to accept for target release configuration. Target release configuration with a generation less than this number will be ignored.\n\nFor example, let's say that the current target release generation is 5. Then, when reconfigurator detects a MUPdate:\n\n* the target release is ignored in favor of the install dataset * this field is set to 6\n\nOnce an operator sets a new target release, its generation will be 6 or higher. Reconfigurator will then know that it is back in charge of driving the system to the target release.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "time_created": { - "description": "when this blueprint was generated (for debugging)", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "cockroachdb_fingerprint", - "cockroachdb_setting_preserve_downgrade", - "comment", - "creator", - "external_dns_version", - "id", - "internal_dns_version", - "nexus_generation", - "oximeter_read_mode", - "oximeter_read_version", - "pending_mgs_updates", - "sleds", - "source", - "target_release_minimum_generation", - "time_created" - ] - }, - "BlueprintArtifactVersion": { - "description": "The version of an artifact in a blueprint.\n\nThis is used for debugging output.", - "oneOf": [ - { - "description": "A specific version of the image is available.", - "type": "object", - "properties": { - "artifact_version": { - "type": "string", - "enum": [ - "available" - ] - }, - "version": { - "$ref": "#/components/schemas/ArtifactVersion" - } - }, - "required": [ - "artifact_version", - "version" - ] - }, - { - "description": "The version could not be determined. This is non-fatal.", - "type": "object", - "properties": { - "artifact_version": { - "type": "string", - "enum": [ - "unknown" - ] - } - }, - "required": [ - "artifact_version" - ] - } - ] - }, - "BlueprintDatasetConfig": { - "description": "Information about a dataset as recorded in a blueprint", - "type": "object", - "properties": { - "address": { - "nullable": true, - "type": "string" - }, - "compression": { - "$ref": "#/components/schemas/CompressionAlgorithm" - }, - "disposition": { - "$ref": "#/components/schemas/BlueprintDatasetDisposition" - }, - "id": { - "$ref": "#/components/schemas/TypedUuidForDatasetKind" - }, - "kind": { - "$ref": "#/components/schemas/DatasetKind" - }, - "pool": { - "$ref": "#/components/schemas/ZpoolName" - }, - "quota": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - }, - "reservation": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - } - }, - "required": [ - "compression", - "disposition", - "id", - "kind", - "pool" - ] - }, - "BlueprintDatasetDisposition": { - "description": "The desired state of an Omicron-managed dataset in a blueprint.\n\nPart of [`BlueprintDatasetConfig`].", - "oneOf": [ - { - "description": "The dataset is in-service.", - "type": "string", - "enum": [ - "in_service" - ] - }, - { - "description": "The dataset is permanently gone.", - "type": "string", - "enum": [ - "expunged" - ] - } - ] - }, - "BlueprintHostPhase2DesiredContents": { - "description": "Describes the desired contents of a host phase 2 slot (i.e., the boot partition on one of the internal M.2 drives).\n\nThis is the blueprint version of [`HostPhase2DesiredContents`].", - "oneOf": [ - { - "description": "Do not change the current contents.\n\nWe use this value when we've detected a sled has been mupdated (and we don't want to overwrite phase 2 images until we understand how to recover from that mupdate) and as the default value when reading a blueprint that was ledgered before this concept existed.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "current_contents" - ] - } - }, - "required": [ - "type" - ] - }, - { - "description": "Set the phase 2 slot to the given artifact.\n\nThe artifact will come from an unpacked and distributed TUF repo.", - "type": "object", - "properties": { - "hash": { - "type": "string", - "format": "hex string (32 bytes)" - }, - "type": { - "type": "string", - "enum": [ - "artifact" - ] - }, - "version": { - "$ref": "#/components/schemas/BlueprintArtifactVersion" - } - }, - "required": [ - "hash", - "type", - "version" - ] - } - ] - }, - "BlueprintHostPhase2DesiredSlots": { - "description": "Describes the desired contents for both host phase 2 slots.\n\nThis is the blueprint version of [`HostPhase2DesiredSlots`].", - "type": "object", - "properties": { - "slot_a": { - "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" - }, - "slot_b": { - "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" - } - }, - "required": [ - "slot_a", - "slot_b" - ] - }, - "BlueprintMetadata": { - "description": "Describe high-level metadata about a blueprint", - "type": "object", - "properties": { - "cockroachdb_fingerprint": { - "description": "CockroachDB state fingerprint when this blueprint was created", - "type": "string" - }, - "cockroachdb_setting_preserve_downgrade": { - "nullable": true, - "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to (`None` if this value was retrieved from the database and was invalid)", - "allOf": [ - { - "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" - } - ] - }, - "comment": { - "description": "human-readable string describing why this blueprint was created (for debugging)", - "type": "string" - }, - "creator": { - "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", - "type": "string" - }, - "external_dns_version": { - "description": "external DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "id": { - "description": "unique identifier for this blueprint", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, - "internal_dns_version": { - "description": "internal DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "nexus_generation": { - "description": "The Nexus generation number\n\nSee [`Blueprint::nexus_generation`].", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "parent_blueprint_id": { - "nullable": true, - "description": "which blueprint this blueprint is based on", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, - "source": { - "description": "source of the blueprint (for debugging)", - "allOf": [ - { - "$ref": "#/components/schemas/BlueprintSource" - } - ] - }, - "target_release_minimum_generation": { - "description": "The minimum generation for the target release.\n\nSee [`Blueprint::target_release_minimum_generation`].", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "time_created": { - "description": "when this blueprint was generated (for debugging)", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "cockroachdb_fingerprint", - "comment", - "creator", - "external_dns_version", - "id", - "internal_dns_version", - "nexus_generation", - "source", - "target_release_minimum_generation", - "time_created" - ] - }, - "BlueprintMetadataResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/BlueprintMetadata" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "BlueprintPhysicalDiskConfig": { - "description": "Information about an Omicron physical disk as recorded in a bluerprint.", - "type": "object", - "properties": { - "disposition": { - "$ref": "#/components/schemas/BlueprintPhysicalDiskDisposition" - }, - "id": { - "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" - }, - "identity": { - "$ref": "#/components/schemas/DiskIdentity" - }, - "pool_id": { - "$ref": "#/components/schemas/TypedUuidForZpoolKind" - } - }, - "required": [ - "disposition", - "id", - "identity", - "pool_id" - ] - }, - "BlueprintPhysicalDiskDisposition": { - "description": "The desired state of an Omicron-managed physical disk in a blueprint.", - "oneOf": [ - { - "description": "The physical disk is in-service.", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "in_service" - ] - } - }, - "required": [ - "kind" - ] - }, - { - "description": "The physical disk is permanently gone.", - "type": "object", - "properties": { - "as_of_generation": { - "description": "Generation of the parent config in which this disk became expunged.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "kind": { - "type": "string", - "enum": [ - "expunged" - ] - }, - "ready_for_cleanup": { - "description": "True if Reconfiguration knows that this disk has been expunged.\n\nIn the current implementation, this means either:\n\na) the sled where the disk was residing has been expunged.\n\nb) the planner has observed an inventory collection where the disk expungement was seen by the sled agent on the sled where the disk was previously in service. This is indicated by the inventory reporting a disk generation at least as high as `as_of_generation`.", - "type": "boolean" - } - }, - "required": [ - "as_of_generation", - "kind", - "ready_for_cleanup" - ] - } - ] - }, - "BlueprintSledConfig": { - "description": "Information about the configuration of a sled as recorded in a blueprint.\n\nPart of [`Blueprint`].", - "type": "object", - "properties": { - "datasets": { - "$ref": "#/components/schemas/IdMapBlueprintDatasetConfig" - }, - "disks": { - "$ref": "#/components/schemas/IdMapBlueprintPhysicalDiskConfig" - }, - "host_phase_2": { - "$ref": "#/components/schemas/BlueprintHostPhase2DesiredSlots" - }, - "remove_mupdate_override": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" - } - ] - }, - "sled_agent_generation": { - "description": "Generation number used when this type is converted into an `OmicronSledConfig` for use by sled-agent.\n\nThis field is explicitly named `sled_agent_generation` to indicate that it is only required to cover information that changes what Reconfigurator sends to sled agent. For example, changing the sled `state` from `Active` to `Decommissioned` would not require a bump to `sled_agent_generation`, because a `Decommissioned` sled will never be sent an `OmicronSledConfig`.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "state": { - "$ref": "#/components/schemas/SledState" - }, - "zones": { - "$ref": "#/components/schemas/IdMapBlueprintZoneConfig" - } - }, - "required": [ - "datasets", - "disks", - "host_phase_2", - "sled_agent_generation", - "state", - "zones" - ] - }, - "BlueprintSource": { - "description": "Description of the source of a blueprint.", - "oneOf": [ - { - "description": "The initial blueprint created by the rack setup service.", - "type": "object", - "properties": { - "source": { - "type": "string", - "enum": [ - "rss" - ] - } - }, - "required": [ - "source" - ] - }, - { - "description": "A blueprint created by the planner, and we still have the associated planning report.", - "type": "object", - "properties": { - "add": { - "$ref": "#/components/schemas/PlanningAddStepReport" - }, - "cockroachdb_settings": { - "$ref": "#/components/schemas/PlanningCockroachdbSettingsStepReport" - }, - "decommission": { - "$ref": "#/components/schemas/PlanningDecommissionStepReport" - }, - "expunge": { - "$ref": "#/components/schemas/PlanningExpungeStepReport" - }, - "mgs_updates": { - "$ref": "#/components/schemas/PlanningMgsUpdatesStepReport" - }, - "nexus_generation_bump": { - "$ref": "#/components/schemas/PlanningNexusGenerationBumpReport" - }, - "noop_image_source": { - "$ref": "#/components/schemas/PlanningNoopImageSourceStepReport" - }, - "planner_config": { - "description": "The configuration in effect for this planning run.", - "allOf": [ - { - "$ref": "#/components/schemas/PlannerConfig" - } - ] - }, - "source": { - "type": "string", - "enum": [ - "planner" - ] - }, - "zone_updates": { - "$ref": "#/components/schemas/PlanningZoneUpdatesStepReport" - } - }, - "required": [ - "add", - "cockroachdb_settings", - "decommission", - "expunge", - "mgs_updates", - "nexus_generation_bump", - "noop_image_source", - "planner_config", - "source", - "zone_updates" - ] - }, - { - "description": "A blueprint created by the planner but loaded from the database, so we no longer have the associated planning report.", - "type": "object", - "properties": { - "source": { - "type": "string", - "enum": [ - "planner_loaded_from_database" - ] - } - }, - "required": [ - "source" - ] - }, - { - "description": "This blueprint was created by one of `reconfigurator-cli`'s blueprint editing subcommands.", - "type": "object", - "properties": { - "source": { - "type": "string", - "enum": [ - "reconfigurator_cli_edit" - ] - } - }, - "required": [ - "source" - ] - }, - { - "description": "This blueprint was constructed by hand by an automated test.", - "type": "object", - "properties": { - "source": { - "type": "string", - "enum": [ - "test" - ] - } - }, - "required": [ - "source" - ] - } - ] - }, - "BlueprintTarget": { - "description": "Describes what blueprint, if any, the system is currently working toward", - "type": "object", - "properties": { - "enabled": { - "description": "policy: should the system actively work towards this blueprint\n\nThis should generally be left enabled.", - "type": "boolean" - }, - "target_id": { - "description": "id of the blueprint that the system is trying to make real", - "allOf": [ - { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - ] - }, - "time_made_target": { - "description": "when this blueprint was made the target", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "enabled", - "target_id", - "time_made_target" - ] - }, - "BlueprintTargetSet": { - "description": "Specifies what blueprint, if any, the system should be working toward", - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "target_id": { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" - } - }, - "required": [ - "enabled", - "target_id" - ] - }, - "BlueprintZoneConfig": { - "description": "Describes one Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintSledConfig`].", - "type": "object", - "properties": { - "disposition": { - "description": "The disposition (desired state) of this zone recorded in the blueprint.", - "allOf": [ - { - "$ref": "#/components/schemas/BlueprintZoneDisposition" - } - ] - }, - "filesystem_pool": { - "description": "zpool used for the zone's (transient) root filesystem", - "allOf": [ - { - "$ref": "#/components/schemas/ZpoolName" - } - ] - }, - "id": { - "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" - }, - "image_source": { - "$ref": "#/components/schemas/BlueprintZoneImageSource" - }, - "zone_type": { - "$ref": "#/components/schemas/BlueprintZoneType" - } - }, - "required": [ - "disposition", - "filesystem_pool", - "id", - "image_source", - "zone_type" - ] - }, - "BlueprintZoneDisposition": { - "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", - "oneOf": [ - { - "description": "The zone is in-service.", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "in_service" - ] - } - }, - "required": [ - "kind" - ] - }, - { - "description": "The zone is permanently gone.", - "type": "object", - "properties": { - "as_of_generation": { - "description": "Generation of the parent config in which this zone became expunged.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "kind": { - "type": "string", - "enum": [ - "expunged" - ] - }, - "ready_for_cleanup": { - "description": "True if Reconfiguration knows that this zone has been shut down and will not be restarted.\n\nIn the current implementation, this means the planner has observed an inventory collection where the sled on which this zone was running (a) is no longer running the zone and (b) has a config generation at least as high as `as_of_generation`, indicating it will not try to start the zone on a cold boot based on an older config.", - "type": "boolean" - } - }, - "required": [ - "as_of_generation", - "kind", - "ready_for_cleanup" - ] - } - ] - }, - "BlueprintZoneImageSource": { - "description": "Where the zone's image source is located.\n\nThis is the blueprint version of [`OmicronZoneImageSource`].", - "oneOf": [ - { - "description": "This zone's image source is whatever happens to be on the sled's \"install\" dataset.\n\nThis is whatever was put in place at the factory or by the latest MUPdate. The image used here can vary by sled and even over time (if the sled gets MUPdated again).\n\nHistorically, this was the only source for zone images. In an system with automated control-plane-driven update we expect to only use this variant in emergencies where the system had to be recovered via MUPdate.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "install_dataset" - ] - } - }, - "required": [ - "type" - ] - }, - { - "description": "This zone's image source is the artifact matching this hash from the TUF artifact store (aka \"TUF repo depot\").\n\nThis originates from TUF repos uploaded to Nexus which are then replicated out to all sleds.", - "type": "object", - "properties": { - "hash": { - "type": "string", - "format": "hex string (32 bytes)" - }, - "type": { - "type": "string", - "enum": [ - "artifact" - ] - }, - "version": { - "$ref": "#/components/schemas/BlueprintArtifactVersion" - } - }, - "required": [ - "hash", - "type", - "version" - ] - } - ] - }, - "BlueprintZoneType": { - "oneOf": [ - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "external_ip": { - "$ref": "#/components/schemas/OmicronZoneExternalSnatIp" - }, - "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "type": { - "type": "string", - "enum": [ - "boundary_ntp" - ] - } - }, - "required": [ - "address", - "dns_servers", - "external_ip", - "nic", - "ntp_servers", - "type" - ] - }, - { - "description": "Used in single-node clickhouse setups", - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "description": "Used in replicated clickhouse setups", - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse_server" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "cockroach_db" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "crucible_pantry" - ] - } - }, - "required": [ - "address", - "type" - ] - }, + } + }, + "components": { + "schemas": { + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ { + "description": "Allow traffic from any external IP address.", "type": "object", "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "description": "The address at which the external DNS server is reachable.", - "allOf": [ - { - "$ref": "#/components/schemas/OmicronZoneExternalFloatingAddr" - } - ] - }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "type": { + "allow": { "type": "string", "enum": [ - "external_dns" + "any" ] } }, "required": [ - "dataset", - "dns_address", - "http_address", - "nic", - "type" + "allow" ] }, { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", "type": "object", "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "type": "string" - }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "http_address": { - "type": "string" - }, - "type": { + "allow": { "type": "string", "enum": [ - "internal_dns" + "list" ] - } - }, - "required": [ - "dataset", - "dns_address", - "gz_address", - "gz_address_index", - "http_address", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" }, - "type": { - "type": "string", - "enum": [ - "internal_ntp" - ] - } - }, - "required": [ - "address", - "type" - ] - }, - { - "type": "object", - "properties": { - "external_dns_servers": { - "description": "External DNS servers Nexus can use to resolve external hosts.", + "ips": { "type": "array", "items": { - "type": "string", - "format": "ip" + "$ref": "#/components/schemas/IpNet" } - }, - "external_ip": { - "description": "The address at which the external nexus server is reachable.", - "allOf": [ - { - "$ref": "#/components/schemas/OmicronZoneExternalFloatingIp" - } - ] - }, - "external_tls": { - "description": "Whether Nexus's external endpoint should use TLS", - "type": "boolean" - }, - "internal_address": { - "description": "The address at which the internal nexus server is reachable.", - "type": "string" - }, - "lockstep_port": { - "description": "The port at which the lockstep server is reachable. This shares the same IP address with `internal_address`.", - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "nexus_generation": { - "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "nexus" - ] } }, "required": [ - "external_dns_servers", - "external_ip", - "external_tls", - "internal_address", - "lockstep_port", - "nexus_generation", - "nic", - "type" + "allow", + "ips" ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "oximeter" - ] - } + } + ] + }, + "ArtifactVersion": { + "description": "An artifact version.\n\nThis is a freeform identifier with some basic validation. It may be the serialized form of a semver version, or a custom identifier that uses the same character set as a semver, plus `_`.\n\nThe exact pattern accepted is `^[a-zA-Z0-9._+-]{1,63}$`.\n\n# Ord implementation\n\n`ArtifactVersion`s are not intended to be sorted, just compared for equality. `ArtifactVersion` implements `Ord` only for storage within sorted collections.", + "type": "string", + "pattern": "^[a-zA-Z0-9._+-]{1,63}$" + }, + "BackgroundTasksActivateRequest": { + "description": "Query parameters for Background Task activation requests.", + "type": "object", + "properties": { + "bgtask_names": { + "type": "array", + "items": { + "type": "string" }, - "required": [ - "address", - "type" - ] + "uniqueItems": true } + }, + "required": [ + "bgtask_names" ] }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", "type": "object", "properties": { - "cert": { + "part": { "type": "string" }, - "key": { + "revision": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "serial": { "type": "string" } }, "required": [ - "cert", - "key" + "part", + "revision", + "serial" ] }, - "ClickhouseClusterConfig": { - "description": "Global configuration for all clickhouse servers (replicas) and keepers", + "BaseboardId": { + "description": "A unique baseboard id found during a collection\n\nBaseboard ids are the keys used to link up information from disparate sources (like a service processor and a sled agent).\n\nThese are normalized in the database. Each distinct baseboard id is assigned a uuid and shared across the many possible collections that reference it.\n\nUsually, the part number and serial number are combined with a revision number. We do not include that here. If we ever did find a baseboard with the same part number and serial number but a new revision number, we'd want to treat that as the same baseboard as one with a different revision number.", "type": "object", "properties": { - "cluster_name": { - "description": "An arbitrary name for the Clickhouse cluster shared by all nodes", + "part_number": { + "description": "Oxide Part Number", "type": "string" }, - "cluster_secret": { - "description": "An arbitrary string shared by all nodes used at runtime to determine whether nodes are part of the same cluster.", + "serial_number": { + "description": "Serial number (unique for a given part number)", "type": "string" + } + }, + "required": [ + "part_number", + "serial_number" + ] + }, + "BfdMode": { + "description": "BFD connection mode.", + "type": "string", + "enum": [ + "single_hop", + "multi_hop" + ] + }, + "BfdPeerConfig": { + "type": "object", + "properties": { + "detection_threshold": { + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "generation": { - "description": "The last update to the clickhouse cluster configuration\n\nThis is used by `clickhouse-admin` in the clickhouse server and keeper zones to discard old configurations.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] + "local": { + "nullable": true, + "type": "string", + "format": "ip" }, - "highest_seen_keeper_leader_committed_log_index": { - "description": "This is used as a marker to tell if the raft configuration in a new inventory collection is newer than the last collection. This serves as a surrogate for the log index of the last committed configuration, which clickhouse keeper doesn't expose.\n\nThis is necesssary because during inventory collection we poll multiple keeper nodes, and each returns their local knowledge of the configuration. But we may reach different nodes in different attempts, and some nodes in a following attempt may reflect stale configuration. Due to timing, we can always query old information. That is just normal polling. However, we never want to use old configuration if we have already seen and acted on newer configuration.", + "mode": { + "$ref": "#/components/schemas/BfdMode" + }, + "remote": { + "type": "string", + "format": "ip" + }, + "required_rx": { "type": "integer", "format": "uint64", "minimum": 0 }, - "keepers": { - "description": "The desired state of the clickhouse keeper cluster\n\nWe decouple deployment of zones that should contain clickhouse keeper processes from actually starting or stopping those processes, adding or removing them to/from the keeper cluster, and reconfiguring other keeper and clickhouse server nodes to reflect the new configuration.\n\nAs part of this decoupling, we keep track of the intended zone deployment in the blueprint, but that is not enough to track the desired state of the keeper cluster. We are only allowed to add or remove one keeper node at a time, and therefore we must track the desired state of the keeper cluster which may change multiple times until the keepers in the cluster match the deployed zones. An example may help:\n\n1. We start with 3 keeper nodes in 3 deployed keeper zones and need to add two to reach our desired policy of 5 keepers 2. The planner adds 2 new keeper zones to the blueprint 3. The planner will also add **one** new keeper to the `keepers` field below that matches one of the deployed zones. 4. The executor will start the new keeper process that was added to the `keepers` field, attempt to add it to the keeper cluster by pushing configuration updates to the other keepers, and then updating the clickhouse server configurations to know about the new keeper. 5. If the keeper is successfully added, as reflected in inventory, then steps 3 and 4 above will be repeated for the next keeper process. 6. If the keeper is not successfully added by the executor it will continue to retry indefinitely. 7. If the zone is expunged while the planner has it as part of its desired state in `keepers`, and the executor is trying to add it, the keeper will be removed from `keepers` in the next blueprint. If it has been added to the actual cluster by an executor in the meantime it will be removed on the next iteration of an executor.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/KeeperId" + "switch": { + "$ref": "#/components/schemas/SwitchLocation" + } + }, + "required": [ + "detection_threshold", + "mode", + "remote", + "required_rx", + "switch" + ] + }, + "BgpConfig": { + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number for the BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "default": null, + "type": "string" + }, + "originate": { + "description": "The set of prefixes for the BGP router to originate.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Ipv4Net" } }, - "max_used_keeper_id": { - "description": "Clickhouse Keeper IDs must be unique and are handed out monotonically. Keep track of the last used one.", + "shaper": { + "nullable": true, + "description": "Shaper to apply to outgoing messages.", + "default": null, + "type": "string" + } + }, + "required": [ + "asn", + "originate" + ] + }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, "allOf": [ { - "$ref": "#/components/schemas/KeeperId" + "$ref": "#/components/schemas/ImportExportPolicy" } ] }, - "max_used_server_id": { - "description": "Clickhouse Server IDs must be unique and are handed out monotonically. Keep track of the last used one.", + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, "allOf": [ { - "$ref": "#/components/schemas/ServerId" + "$ref": "#/components/schemas/ImportExportPolicy" } ] }, - "servers": { - "description": "The desired state of clickhouse server processes on the rack\n\nClickhouse servers do not have the same limitations as keepers and can be deployed all at once.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ServerId" + "asn": { + "description": "The autonomous system number of the router the peer belongs to.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 } - } - }, - "required": [ - "cluster_name", - "cluster_secret", - "generation", - "highest_seen_keeper_leader_committed_log_index", - "keepers", - "max_used_keeper_id", - "max_used_server_id", - "servers" - ] - }, - "ClickhouseMode": { - "description": "How to deploy clickhouse nodes", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "single_node_only" - ] - } - }, - "required": [ - "type" - ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "cluster_only" - ] - }, - "value": { - "type": "object", - "properties": { - "target_keepers": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "target_servers": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "required": [ - "target_keepers", - "target_servers" - ] - } - }, - "required": [ - "type", - "value" - ] + "connect_retry": { + "nullable": true, + "description": "The interval in seconds between peer connection retry attempts.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "delay_open": { + "nullable": true, + "description": "How long to delay sending open messages to a peer. In seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, + "hold_time": { + "nullable": true, + "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "idle_hold_time": { + "nullable": true, + "description": "How long to keep a peer in idle after a state machine reset in seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "keepalive": { + "nullable": true, + "description": "The interval to send keepalive messages at.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "default": null, + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "default": null, + "type": "string" }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "both" - ] - }, - "value": { - "type": "object", - "properties": { - "target_keepers": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "target_servers": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "required": [ - "target_keepers", - "target_servers" - ] - } - }, - "required": [ - "type", - "value" - ] - } - ] - }, - "ClickhousePolicy": { - "type": "object", - "properties": { - "mode": { - "$ref": "#/components/schemas/ClickhouseMode" + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "default": null, + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "time_created": { - "type": "string", - "format": "date-time" + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "default": null, + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "version": { + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "default": null, + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "mode", - "time_created", - "version" - ] - }, - "CockroachDbClusterVersion": { - "description": "CockroachDB cluster versions we are aware of.\n\nCockroachDB can be upgraded from one major version to the next, e.g. v22.1 -> v22.2. Each major version introduces changes in how it stores data on disk to support new features, and each major version has support for reading the previous version's data so that it can perform an upgrade. The version of the data format is called the \"cluster version\", which is distinct from but related to the software version that's being run.\n\nWhile software version v22.2 is using cluster version v22.1, it's possible to downgrade back to v22.1. Once the cluster version is upgraded, there's no going back.\n\nTo give us some time to evaluate new versions of the software while retaining a downgrade path, we currently deploy new versions of CockroachDB across two releases of the Oxide software, in a \"tick-tock\" model:\n\n- In \"tick\" releases, we upgrade the version of the CockroachDB software to a new major version, and update `CockroachDbClusterVersion::NEWLY_INITIALIZED`. On upgraded racks, the new version is running with the previous cluster version; on newly-initialized racks, the new version is running with the new cluser version. - In \"tock\" releases, we change `CockroachDbClusterVersion::POLICY` to the major version we upgraded to in the last \"tick\" release. This results in a new blueprint that upgrades the cluster version, destroying the downgrade path but allowing us to eventually upgrade to the next release.\n\nThese presently describe major versions of CockroachDB. The order of these must be maintained in the correct order (the first variant must be the earliest version).", - "type": "string", - "enum": [ - "V22_1" - ] - }, - "CockroachDbPreserveDowngrade": { - "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to.", - "oneOf": [ - { - "description": "Do not modify the setting.", - "type": "object", - "properties": { - "action": { - "type": "string", - "enum": [ - "do_not_modify" - ] - } - }, - "required": [ - "action" - ] - }, - { - "description": "Ensure the setting is set to an empty string.", - "type": "object", - "properties": { - "action": { - "type": "string", - "enum": [ - "allow_upgrade" - ] - } - }, - "required": [ - "action" - ] - }, - { - "description": "Ensure the setting is set to a given cluster version.", - "type": "object", - "properties": { - "action": { - "type": "string", - "enum": [ - "set" - ] - }, - "data": { - "$ref": "#/components/schemas/CockroachDbClusterVersion" - } - }, - "required": [ - "action", - "data" - ] - } + "addr", + "asn", + "port" ] }, - "CockroachdbUnsafeToShutdown": { - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "missing_live_nodes_stat" - ] + "Blueprint": { + "description": "Describes a complete set of software and configuration for the system", + "type": "object", + "properties": { + "clickhouse_cluster_config": { + "nullable": true, + "description": "Allocation of Clickhouse Servers and Keepers for replicated clickhouse setups. This is set to `None` if replicated clickhouse is not in use.", + "allOf": [ + { + "$ref": "#/components/schemas/ClickhouseClusterConfig" } - }, - "required": [ - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "missing_underreplicated_stat" - ] - } - }, - "required": [ - "type" - ] + "cockroachdb_fingerprint": { + "description": "CockroachDB state fingerprint when this blueprint was created", + "type": "string" }, - { - "type": "object", - "properties": { - "live_nodes": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "not_enough_live_nodes" - ] + "cockroachdb_setting_preserve_downgrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to", + "allOf": [ + { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" } - }, - "required": [ - "live_nodes", - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "not_enough_nodes" - ] - } - }, - "required": [ - "type" - ] + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", + "type": "string" }, - { - "type": "object", - "properties": { - "n": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "underreplicated_ranges" - ] - } - }, - "required": [ - "n", - "type" - ] - } - ] - }, - "CompletedAttempt": { - "description": "externally-exposed status for a completed attempt", - "type": "object", - "properties": { - "elapsed": { - "$ref": "#/components/schemas/Duration" + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", + "type": "string" }, - "nattempts_done": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "external_dns_version": { + "description": "external DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] }, - "request": { - "$ref": "#/components/schemas/PendingMgsUpdate" + "id": { + "description": "unique identifier for this blueprint", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] }, - "result": { - "x-rust-type": { - "crate": "std", - "parameters": [ - { - "$ref": "#/components/schemas/UpdateCompletedHow" - }, - { - "type": "string" - } - ], - "path": "::std::result::Result", - "version": "*" - }, - "oneOf": [ + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ { - "type": "object", - "properties": { - "ok": { - "$ref": "#/components/schemas/UpdateCompletedHow" - } - }, - "required": [ - "ok" - ] - }, + "$ref": "#/components/schemas/Generation" + } + ] + }, + "nexus_generation": { + "description": "The generation of the active group of Nexuses\n\nIf a Nexus instance notices it has a nexus_generation less than this value, it will start to quiesce in preparation for handing off control to the newer generation (see: RFD 588).", + "allOf": [ { - "type": "object", - "properties": { - "err": { - "type": "string" - } - }, - "required": [ - "err" - ] + "$ref": "#/components/schemas/Generation" } ] }, - "time_done": { - "type": "string", - "format": "date-time" + "oximeter_read_mode": { + "description": "Whether oximeter should read from a single node or a cluster", + "allOf": [ + { + "$ref": "#/components/schemas/OximeterReadMode" + } + ] }, - "time_started": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "elapsed", - "nattempts_done", - "request", - "result", - "time_done", - "time_started" - ] - }, - "CompressionAlgorithm": { - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "on" - ] + "oximeter_read_version": { + "description": "Oximeter read policy version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "off" - ] + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" } - }, - "required": [ - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "gzip" - ] + "pending_mgs_updates": { + "description": "List of pending MGS-mediated updates", + "allOf": [ + { + "$ref": "#/components/schemas/PendingMgsUpdates" } - }, - "required": [ - "type" ] }, - { + "sleds": { + "description": "A map of sled id -> desired configuration of the sled.", "type": "object", - "properties": { - "level": { - "$ref": "#/components/schemas/GzipLevel" - }, - "type": { - "type": "string", - "enum": [ - "gzip_n" - ] + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintSledConfig" + } + }, + "source": { + "description": "Source of this blueprint (can include planning report)", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintSource" } - }, - "required": [ - "level", - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "lz4" - ] + "target_release_minimum_generation": { + "description": "The minimum release generation to accept for target release configuration. Target release configuration with a generation less than this number will be ignored.\n\nFor example, let's say that the current target release generation is 5. Then, when reconfigurator detects a MUPdate:\n\n* the target release is ignored in favor of the install dataset * this field is set to 6\n\nOnce an operator sets a new target release, its generation will be 6 or higher. Reconfigurator will then know that it is back in charge of driving the system to the target release.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "type" ] }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "cockroachdb_fingerprint", + "cockroachdb_setting_preserve_downgrade", + "comment", + "creator", + "external_dns_version", + "id", + "internal_dns_version", + "nexus_generation", + "oximeter_read_mode", + "oximeter_read_version", + "pending_mgs_updates", + "sleds", + "source", + "target_release_minimum_generation", + "time_created" + ] + }, + "BlueprintArtifactVersion": { + "description": "The version of an artifact in a blueprint.\n\nThis is used for debugging output.", + "oneOf": [ { + "description": "A specific version of the image is available.", "type": "object", "properties": { - "type": { + "artifact_version": { "type": "string", "enum": [ - "lzjb" + "available" ] + }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" } }, "required": [ - "type" + "artifact_version", + "version" ] }, { + "description": "The version could not be determined. This is non-fatal.", "type": "object", "properties": { - "type": { + "artifact_version": { "type": "string", "enum": [ - "zle" + "unknown" ] } }, "required": [ - "type" + "artifact_version" ] } ] }, - "CrucibleDatasetCreateRequest": { + "BlueprintDatasetConfig": { + "description": "Information about a dataset as recorded in a blueprint", "type": "object", "properties": { "address": { + "nullable": true, "type": "string" }, - "dataset_id": { + "compression": { + "$ref": "#/components/schemas/CompressionAlgorithm" + }, + "disposition": { + "$ref": "#/components/schemas/BlueprintDatasetDisposition" + }, + "id": { "$ref": "#/components/schemas/TypedUuidForDatasetKind" }, - "zpool_id": { - "$ref": "#/components/schemas/TypedUuidForZpoolKind" + "kind": { + "$ref": "#/components/schemas/DatasetKind" + }, + "pool": { + "$ref": "#/components/schemas/ZpoolName" + }, + "quota": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "reservation": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] } }, "required": [ - "address", - "dataset_id", - "zpool_id" + "compression", + "disposition", + "id", + "kind", + "pool" + ] + }, + "BlueprintDatasetDisposition": { + "description": "The desired state of an Omicron-managed dataset in a blueprint.\n\nPart of [`BlueprintDatasetConfig`].", + "oneOf": [ + { + "description": "The dataset is in-service.", + "type": "string", + "enum": [ + "in_service" + ] + }, + { + "description": "The dataset is permanently gone.", + "type": "string", + "enum": [ + "expunged" + ] + } ] }, - "CurrentStatus": { - "description": "Describes the current status of a background task", + "BlueprintHostPhase2DesiredContents": { + "description": "Describes the desired contents of a host phase 2 slot (i.e., the boot partition on one of the internal M.2 drives).\n\nThis is the blueprint version of [`HostPhase2DesiredContents`].", "oneOf": [ { - "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", + "description": "Do not change the current contents.\n\nWe use this value when we've detected a sled has been mupdated (and we don't want to overwrite phase 2 images until we understand how to recover from that mupdate) and as the default value when reading a blueprint that was ledgered before this concept existed.", "type": "object", "properties": { - "current_status": { + "type": { "type": "string", "enum": [ - "idle" + "current_contents" ] } }, "required": [ - "current_status" + "type" ] }, { - "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", + "description": "Set the phase 2 slot to the given artifact.\n\nThe artifact will come from an unpacked and distributed TUF repo.", "type": "object", "properties": { - "current_status": { + "hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "type": { "type": "string", "enum": [ - "running" + "artifact" ] }, - "details": { - "$ref": "#/components/schemas/CurrentStatusRunning" + "version": { + "$ref": "#/components/schemas/BlueprintArtifactVersion" } }, "required": [ - "current_status", - "details" + "hash", + "type", + "version" ] } ] }, - "CurrentStatusRunning": { + "BlueprintHostPhase2DesiredSlots": { + "description": "Describes the desired contents for both host phase 2 slots.\n\nThis is the blueprint version of [`HostPhase2DesiredSlots`].", "type": "object", "properties": { - "iteration": { - "description": "which iteration this was (counter)", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "reason": { - "description": "what kind of event triggered this activation", - "allOf": [ - { - "$ref": "#/components/schemas/ActivationReason" - } - ] + "slot_a": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" }, - "start_time": { - "description": "wall-clock time when the current activation started", - "type": "string", - "format": "date-time" + "slot_b": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" } }, "required": [ - "iteration", - "reason", - "start_time" + "slot_a", + "slot_b" ] }, - "DatasetKind": { - "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", - "type": "string" - }, - "DemoSaga": { - "description": "Identifies an instance of the demo saga", + "BlueprintPhysicalDiskConfig": { + "description": "Information about an Omicron physical disk as recorded in a bluerprint.", "type": "object", "properties": { - "demo_saga_id": { - "$ref": "#/components/schemas/TypedUuidForDemoSagaKind" + "disposition": { + "$ref": "#/components/schemas/BlueprintPhysicalDiskDisposition" }, - "saga_id": { - "type": "string", - "format": "uuid" + "id": { + "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" + }, + "identity": { + "$ref": "#/components/schemas/DiskIdentity" + }, + "pool_id": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" } }, "required": [ - "demo_saga_id", - "saga_id" + "disposition", + "id", + "identity", + "pool_id" ] }, - "DiscretionaryZonePlacement": { - "type": "object", - "properties": { - "kind": { - "type": "string" + "BlueprintPhysicalDiskDisposition": { + "description": "The desired state of an Omicron-managed physical disk in a blueprint.", + "oneOf": [ + { + "description": "The physical disk is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" + ] }, - "source": { - "type": "string" + { + "description": "The physical disk is permanently gone.", + "type": "object", + "properties": { + "as_of_generation": { + "description": "Generation of the parent config in which this disk became expunged.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + }, + "ready_for_cleanup": { + "description": "True if Reconfiguration knows that this disk has been expunged.\n\nIn the current implementation, this means either:\n\na) the sled where the disk was residing has been expunged.\n\nb) the planner has observed an inventory collection where the disk expungement was seen by the sled agent on the sled where the disk was previously in service. This is indicated by the inventory reporting a disk generation at least as high as `as_of_generation`.", + "type": "boolean" + } + }, + "required": [ + "as_of_generation", + "kind", + "ready_for_cleanup" + ] } - }, - "required": [ - "kind", - "source" ] }, - "DiskIdentity": { - "description": "Uniquely identifies a disk.", + "BlueprintSledConfig": { + "description": "Information about the configuration of a sled as recorded in a blueprint.\n\nPart of [`Blueprint`].", "type": "object", "properties": { - "model": { - "type": "string" + "datasets": { + "$ref": "#/components/schemas/IdMapBlueprintDatasetConfig" }, - "serial": { - "type": "string" + "disks": { + "$ref": "#/components/schemas/IdMapBlueprintPhysicalDiskConfig" }, - "vendor": { - "type": "string" - } - }, - "required": [ - "model", - "serial", - "vendor" - ] - }, - "DiskRuntimeState": { - "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", - "type": "object", - "properties": { - "disk_state": { - "description": "runtime state of the Disk", + "host_phase_2": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredSlots" + }, + "remove_mupdate_override": { + "nullable": true, "allOf": [ { - "$ref": "#/components/schemas/DiskState" + "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" } ] }, - "gen": { - "description": "generation number for this state", + "sled_agent_generation": { + "description": "Generation number used when this type is converted into an `OmicronSledConfig` for use by sled-agent.\n\nThis field is explicitly named `sled_agent_generation` to indicate that it is only required to cover information that changes what Reconfigurator sends to sled agent. For example, changing the sled `state` from `Active` to `Decommissioned` would not require a bump to `sled_agent_generation`, because a `Decommissioned` sled will never be sent an `OmicronSledConfig`.", "allOf": [ { "$ref": "#/components/schemas/Generation" } ] }, - "time_updated": { - "description": "timestamp for this information", - "type": "string", - "format": "date-time" + "state": { + "$ref": "#/components/schemas/SledState" + }, + "zones": { + "$ref": "#/components/schemas/IdMapBlueprintZoneConfig" } }, "required": [ - "disk_state", - "gen", - "time_updated" + "datasets", + "disks", + "host_phase_2", + "sled_agent_generation", + "state", + "zones" ] }, - "DiskState": { - "description": "State of a Disk", + "BlueprintSource": { + "description": "Description of the source of a blueprint.", "oneOf": [ { - "description": "Disk is being initialized", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "creating" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is ready but detached from any Instance", + "description": "The initial blueprint created by the rack setup service.", "type": "object", "properties": { - "state": { + "source": { "type": "string", "enum": [ - "detached" + "rss" ] } }, "required": [ - "state" + "source" ] }, { - "description": "Disk is ready to receive blocks from an external source", + "description": "A blueprint created by the planner, and we still have the associated planning report.", "type": "object", "properties": { - "state": { - "type": "string", - "enum": [ - "import_ready" + "add": { + "$ref": "#/components/schemas/PlanningAddStepReport" + }, + "cockroachdb_settings": { + "$ref": "#/components/schemas/PlanningCockroachdbSettingsStepReport" + }, + "decommission": { + "$ref": "#/components/schemas/PlanningDecommissionStepReport" + }, + "expunge": { + "$ref": "#/components/schemas/PlanningExpungeStepReport" + }, + "mgs_updates": { + "$ref": "#/components/schemas/PlanningMgsUpdatesStepReport" + }, + "nexus_generation_bump": { + "$ref": "#/components/schemas/PlanningNexusGenerationBumpReport" + }, + "noop_image_source": { + "$ref": "#/components/schemas/PlanningNoopImageSourceStepReport" + }, + "planner_config": { + "description": "The configuration in effect for this planning run.", + "allOf": [ + { + "$ref": "#/components/schemas/PlannerConfig" + } ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from a URL", - "type": "object", - "properties": { - "state": { + }, + "source": { "type": "string", "enum": [ - "importing_from_url" + "planner" ] + }, + "zone_updates": { + "$ref": "#/components/schemas/PlanningZoneUpdatesStepReport" } }, "required": [ - "state" + "add", + "cockroachdb_settings", + "decommission", + "expunge", + "mgs_updates", + "nexus_generation_bump", + "noop_image_source", + "planner_config", + "source", + "zone_updates" ] }, { - "description": "Disk is importing blocks from bulk writes", + "description": "A blueprint created by the planner but loaded from the database, so we no longer have the associated planning report.", "type": "object", "properties": { - "state": { + "source": { "type": "string", "enum": [ - "importing_from_bulk_writes" + "planner_loaded_from_database" ] } }, "required": [ - "state" + "source" ] }, { - "description": "Disk is being finalized to state Detached", + "description": "This blueprint was created by one of `reconfigurator-cli`'s blueprint editing subcommands.", "type": "object", "properties": { - "state": { + "source": { "type": "string", "enum": [ - "finalizing" + "reconfigurator_cli_edit" ] } }, "required": [ - "state" + "source" ] }, { - "description": "Disk is undergoing maintenance", + "description": "This blueprint was constructed by hand by an automated test.", "type": "object", "properties": { - "state": { + "source": { "type": "string", "enum": [ - "maintenance" + "test" ] } }, "required": [ - "state" + "source" + ] + } + ] + }, + "BlueprintZoneConfig": { + "description": "Describes one Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintSledConfig`].", + "type": "object", + "properties": { + "disposition": { + "description": "The disposition (desired state) of this zone recorded in the blueprint.", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintZoneDisposition" + } + ] + }, + "filesystem_pool": { + "description": "zpool used for the zone's (transient) root filesystem", + "allOf": [ + { + "$ref": "#/components/schemas/ZpoolName" + } ] }, + "id": { + "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" + }, + "image_source": { + "$ref": "#/components/schemas/BlueprintZoneImageSource" + }, + "zone_type": { + "$ref": "#/components/schemas/BlueprintZoneType" + } + }, + "required": [ + "disposition", + "filesystem_pool", + "id", + "image_source", + "zone_type" + ] + }, + "BlueprintZoneDisposition": { + "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", + "oneOf": [ { - "description": "Disk is being attached to the given Instance", + "description": "The zone is in-service.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { + "kind": { "type": "string", "enum": [ - "attaching" + "in_service" ] } }, "required": [ - "instance", - "state" + "kind" ] }, { - "description": "Disk is attached to the given Instance", + "description": "The zone is permanently gone.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" + "as_of_generation": { + "description": "Generation of the parent config in which this zone became expunged.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] }, - "state": { + "kind": { "type": "string", "enum": [ - "attached" + "expunged" ] + }, + "ready_for_cleanup": { + "description": "True if Reconfiguration knows that this zone has been shut down and will not be restarted.\n\nIn the current implementation, this means the planner has observed an inventory collection where the sled on which this zone was running (a) is no longer running the zone and (b) has a config generation at least as high as `as_of_generation`, indicating it will not try to start the zone on a cold boot based on an older config.", + "type": "boolean" } }, "required": [ - "instance", - "state" + "as_of_generation", + "kind", + "ready_for_cleanup" ] - }, + } + ] + }, + "BlueprintZoneImageSource": { + "description": "Where the zone's image source is located.\n\nThis is the blueprint version of [`OmicronZoneImageSource`].", + "oneOf": [ { - "description": "Disk is being detached from the given Instance", + "description": "This zone's image source is whatever happens to be on the sled's \"install\" dataset.\n\nThis is whatever was put in place at the factory or by the latest MUPdate. The image used here can vary by sled and even over time (if the sled gets MUPdated again).\n\nHistorically, this was the only source for zone images. In an system with automated control-plane-driven update we expect to only use this variant in emergencies where the system had to be recovered via MUPdate.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { + "type": { "type": "string", "enum": [ - "detaching" + "install_dataset" ] } }, "required": [ - "instance", - "state" + "type" ] }, { - "description": "Disk has been destroyed", + "description": "This zone's image source is the artifact matching this hash from the TUF artifact store (aka \"TUF repo depot\").\n\nThis originates from TUF repos uploaded to Nexus which are then replicated out to all sleds.", "type": "object", "properties": { - "state": { + "hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "type": { "type": "string", "enum": [ - "destroyed" + "artifact" ] + }, + "version": { + "$ref": "#/components/schemas/BlueprintArtifactVersion" } }, "required": [ - "state" + "hash", + "type", + "version" ] - }, + } + ] + }, + "BlueprintZoneType": { + "oneOf": [ { - "description": "Disk is unavailable", "type": "object", "properties": { - "state": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "external_ip": { + "$ref": "#/components/schemas/OmicronZoneExternalSnatIp" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { "type": "string", "enum": [ - "faulted" + "boundary_ntp" ] } }, "required": [ - "state" + "address", + "dns_servers", + "external_ip", + "nic", + "ntp_servers", + "type" ] - } - ] - }, - "DnsConfigParams": { - "type": "object", - "properties": { - "generation": { - "$ref": "#/components/schemas/Generation" - }, - "serial": { - "description": "See [`DnsConfig`]'s `serial` field for how this is different from `generation`", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "time_created": { - "type": "string", - "format": "date-time" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsConfigZone" - } - } - }, - "required": [ - "generation", - "serial", - "time_created", - "zones" - ] - }, - "DnsConfigZone": { - "description": "Configuration for a specific DNS zone, as opposed to illumos zones in which the services described by these records run.\n\nThe name `@` is special: it describes records that should be provided for queries about `zone_name`. This is used in favor of the empty string as `@` is the name used for this purpose in zone files for most DNS configurations. It also avoids potentially-confusing debug output from naively printing out records and their names - if you've seen an `@` record and tools are unclear about what that means, hopefully you've arrived here!", - "type": "object", - "properties": { - "records": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsRecord" - } - } }, - "zone_name": { - "type": "string" - } - }, - "required": [ - "records", - "zone_name" - ] - }, - "DnsRecord": { - "oneOf": [ { + "description": "Used in single-node clickhouse setups", "type": "object", "properties": { - "data": { - "type": "string", - "format": "ipv4" + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "A" + "clickhouse" ] } }, "required": [ - "data", + "address", + "dataset", "type" ] }, { "type": "object", "properties": { - "data": { - "type": "string", - "format": "ipv6" + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "AAAA" + "clickhouse_keeper" ] } }, "required": [ - "data", + "address", + "dataset", "type" ] }, { + "description": "Used in replicated clickhouse setups", "type": "object", "properties": { - "data": { - "$ref": "#/components/schemas/Srv" + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "SRV" + "clickhouse_server" ] } }, "required": [ - "data", + "address", + "dataset", "type" ] }, { "type": "object", "properties": { - "data": { + "address": { "type": "string" }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "NS" + "cockroach_db" ] } }, "required": [ - "data", + "address", + "dataset", "type" ] - } - ] - }, - "DownstairsClientStopRequest": { - "type": "object", - "properties": { - "reason": { - "$ref": "#/components/schemas/DownstairsClientStopRequestReason" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "reason", - "time" - ] - }, - "DownstairsClientStopRequestReason": { - "type": "string", - "enum": [ - "replacing", - "disabled", - "failed_reconcile", - "i_o_error", - "bad_negotiation_order", - "incompatible", - "failed_live_repair", - "too_many_outstanding_jobs", - "deactivated" - ] - }, - "DownstairsClientStopped": { - "type": "object", - "properties": { - "reason": { - "$ref": "#/components/schemas/DownstairsClientStoppedReason" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "reason", - "time" - ] - }, - "DownstairsClientStoppedReason": { - "type": "string", - "enum": [ - "connection_timeout", - "connection_failed", - "timeout", - "write_failed", - "read_failed", - "requested_stop", - "finished", - "queue_closed", - "receive_task_cancelled" - ] - }, - "DownstairsUnderRepair": { - "type": "object", - "properties": { - "region_uuid": { - "$ref": "#/components/schemas/TypedUuidForDownstairsRegionKind" - }, - "target_addr": { - "type": "string" - } - }, - "required": [ - "region_uuid", - "target_addr" - ] - }, - "Duration": { - "type": "object", - "properties": { - "nanos": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "secs": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "nanos", - "secs" - ] - }, - "Error": { - "description": "Error information from a response.", - "type": "object", - "properties": { - "error_code": { - "type": "string" - }, - "message": { - "type": "string" - }, - "request_id": { - "type": "string" - } - }, - "required": [ - "message", - "request_id" - ] - }, - "ExpectedActiveRotSlot": { - "description": "Describes the expected active RoT slot, and the version we expect to find for it", - "type": "object", - "properties": { - "slot": { - "$ref": "#/components/schemas/RotSlot" }, - "version": { - "$ref": "#/components/schemas/ArtifactVersion" - } - }, - "required": [ - "slot", - "version" - ] - }, - "ExpectedVersion": { - "description": "Describes the version that we expect to find in some firmware slot", - "oneOf": [ { - "description": "We expect to find _no_ valid caboose in this slot", "type": "object", "properties": { - "kind": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { "type": "string", "enum": [ - "no_valid_version" + "crucible" ] } }, "required": [ - "kind" + "address", + "dataset", + "type" ] }, { - "description": "We expect to find the specified version in this slot", "type": "object", "properties": { - "kind": { + "address": { + "type": "string" + }, + "type": { "type": "string", "enum": [ - "version" + "crucible_pantry" ] - }, - "version": { - "$ref": "#/components/schemas/ArtifactVersion" } }, "required": [ - "kind", - "version" + "address", + "type" ] - } - ] - }, - "ExternalPortDiscovery": { - "oneOf": [ + }, { "type": "object", "properties": { - "auto": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "ipv6" - } + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneExternalFloatingAddr" + } + ] + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] } }, "required": [ - "auto" - ], - "additionalProperties": false + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ] }, { "type": "object", "properties": { - "static": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Name" - } - } + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] } }, "required": [ - "static" - ], - "additionalProperties": false - } - ] - }, - "Generation": { - "description": "Generation numbers stored in the database, used for optimistic concurrency control", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "GzipLevel": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "HeldDbClaimInfo": { - "description": "Describes an outstanding database claim (for debugging why quiesce is stuck)", - "type": "object", - "properties": { - "debug": { - "type": "string" - }, - "held_since": { - "type": "string", - "format": "date-time" + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ] }, - "id": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "debug", - "held_since", - "id" - ] - }, - "HostPhase1Status": { - "oneOf": [ { - "description": "This device has no host phase 1 status because it is not a sled (e.g., it's a PSC or switch).", "type": "object", "properties": { - "kind": { + "address": { + "type": "string" + }, + "type": { "type": "string", "enum": [ - "not_a_sled" + "internal_ntp" ] } }, "required": [ - "kind" + "address", + "type" ] }, { "type": "object", "properties": { - "active_slot": { - "nullable": true, + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", "allOf": [ { - "$ref": "#/components/schemas/M2Slot" + "$ref": "#/components/schemas/OmicronZoneExternalFloatingIp" } ] }, - "kind": { - "type": "string", - "enum": [ - "sled" - ] + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" }, - "sled_id": { - "nullable": true, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "lockstep_port": { + "description": "The port at which the lockstep server is reachable. This shares the same IP address with `internal_address`.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "nexus_generation": { + "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", "allOf": [ { - "$ref": "#/components/schemas/TypedUuidForSledKind" + "$ref": "#/components/schemas/Generation" } ] }, - "slot_a_version": { - "$ref": "#/components/schemas/TufRepoVersion" - }, - "slot_b_version": { - "$ref": "#/components/schemas/TufRepoVersion" - } - }, - "required": [ - "kind", - "slot_a_version", - "slot_b_version" - ] - } - ] - }, - "HostPhase2Status": { - "type": "object", - "properties": { - "boot_disk": { - "x-rust-type": { - "crate": "std", - "parameters": [ - { - "$ref": "#/components/schemas/M2Slot" - }, - { - "type": "string" - } - ], - "path": "::std::result::Result", - "version": "*" - }, - "oneOf": [ - { - "type": "object", - "properties": { - "ok": { - "$ref": "#/components/schemas/M2Slot" + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" } - }, - "required": [ - "ok" ] }, - { - "type": "object", - "properties": { - "err": { - "type": "string" - } - }, - "required": [ - "err" - ] - } - ] - }, - "slot_a_version": { - "$ref": "#/components/schemas/TufRepoVersion" - }, - "slot_b_version": { - "$ref": "#/components/schemas/TufRepoVersion" - } - }, - "required": [ - "boot_disk", - "slot_a_version", - "slot_b_version" - ] - }, - "IdMapBlueprintDatasetConfig": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BlueprintDatasetConfig" - } - }, - "IdMapBlueprintPhysicalDiskConfig": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BlueprintPhysicalDiskConfig" - } - }, - "IdMapBlueprintZoneConfig": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BlueprintZoneConfig" - } - }, - "ImportExportPolicy": { - "description": "Define policy relating to the import and export of prefixes from a BGP peer.", - "oneOf": [ - { - "description": "Do not perform any filtering.", - "type": "object", - "properties": { "type": { "type": "string", "enum": [ - "no_filtering" + "nexus" ] } }, "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "lockstep_port", + "nexus_generation", + "nic", "type" ] }, { "type": "object", "properties": { + "address": { + "type": "string" + }, "type": { "type": "string", "enum": [ - "allow" + "oximeter" ] - }, - "value": { - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNet" - } } }, "required": [ - "type", - "value" + "address", + "type" ] } ] }, - "InProgressUpdateStatus": { - "description": "externally-exposed status for each in-progress update", - "type": "object", - "properties": { - "baseboard_id": { - "$ref": "#/components/schemas/BaseboardId" - }, - "nattempts_done": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "status": { - "$ref": "#/components/schemas/UpdateAttemptStatus" - }, - "time_started": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "baseboard_id", - "nattempts_done", - "status", - "time_started" - ] + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "Instance": { - "description": "View of an Instance", + "Certificate": { "type": "object", "properties": { - "auto_restart_cooldown_expiration": { - "nullable": true, - "description": "The time at which the auto-restart cooldown period for this instance completes, permitting it to be automatically restarted again. If the instance enters the `Failed` state, it will not be restarted until after this time.\n\nIf this is not present, then either the instance has never been automatically restarted, or the cooldown period has already expired, allowing the instance to be restarted immediately if it fails.", - "type": "string", - "format": "date-time" - }, - "auto_restart_enabled": { - "description": "`true` if this instance's auto-restart policy will permit the control plane to automatically restart it if it enters the `Failed` state.", - "type": "boolean" - }, - "auto_restart_policy": { - "nullable": true, - "description": "The auto-restart policy configured for this instance, or `null` if no explicit policy has been configured.\n\nThis policy determines whether the instance should be automatically restarted by the control plane on failure. If this is `null`, the control plane will use the default policy when determining whether or not to automatically restart this instance, which may or may not allow it to be restarted. The value of the `auto_restart_enabled` field indicates whether the instance will be auto-restarted, based on its current policy or the default if it has no configured policy.", - "allOf": [ - { - "$ref": "#/components/schemas/InstanceAutoRestartPolicy" - } - ] - }, - "boot_disk_id": { - "nullable": true, - "description": "the ID of the disk used to boot this Instance, if a specific one is assigned.", - "type": "string", - "format": "uuid" - }, - "cpu_platform": { - "nullable": true, - "description": "The CPU platform for this instance. If this is `null`, the instance requires no particular CPU platform.", - "allOf": [ - { - "$ref": "#/components/schemas/InstanceCpuPlatform" - } - ] - }, - "description": { - "description": "human-readable free-form text about a resource", + "cert": { "type": "string" }, - "hostname": { - "description": "RFC1035-compliant hostname for the Instance.", + "key": { + "type": "string" + } + }, + "required": [ + "cert", + "key" + ] + }, + "ClickhouseClusterConfig": { + "description": "Global configuration for all clickhouse servers (replicas) and keepers", + "type": "object", + "properties": { + "cluster_name": { + "description": "An arbitrary name for the Clickhouse cluster shared by all nodes", "type": "string" }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" + "cluster_secret": { + "description": "An arbitrary string shared by all nodes used at runtime to determine whether nodes are part of the same cluster.", + "type": "string" }, - "memory": { - "description": "memory allocated for this Instance", + "generation": { + "description": "The last update to the clickhouse cluster configuration\n\nThis is used by `clickhouse-admin` in the clickhouse server and keeper zones to discard old configurations.", "allOf": [ { - "$ref": "#/components/schemas/ByteCount" + "$ref": "#/components/schemas/Generation" } ] }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "highest_seen_keeper_leader_committed_log_index": { + "description": "This is used as a marker to tell if the raft configuration in a new inventory collection is newer than the last collection. This serves as a surrogate for the log index of the last committed configuration, which clickhouse keeper doesn't expose.\n\nThis is necesssary because during inventory collection we poll multiple keeper nodes, and each returns their local knowledge of the configuration. But we may reach different nodes in different attempts, and some nodes in a following attempt may reflect stale configuration. Due to timing, we can always query old information. That is just normal polling. However, we never want to use old configuration if we have already seen and acted on newer configuration.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "keepers": { + "description": "The desired state of the clickhouse keeper cluster\n\nWe decouple deployment of zones that should contain clickhouse keeper processes from actually starting or stopping those processes, adding or removing them to/from the keeper cluster, and reconfiguring other keeper and clickhouse server nodes to reflect the new configuration.\n\nAs part of this decoupling, we keep track of the intended zone deployment in the blueprint, but that is not enough to track the desired state of the keeper cluster. We are only allowed to add or remove one keeper node at a time, and therefore we must track the desired state of the keeper cluster which may change multiple times until the keepers in the cluster match the deployed zones. An example may help:\n\n1. We start with 3 keeper nodes in 3 deployed keeper zones and need to add two to reach our desired policy of 5 keepers 2. The planner adds 2 new keeper zones to the blueprint 3. The planner will also add **one** new keeper to the `keepers` field below that matches one of the deployed zones. 4. The executor will start the new keeper process that was added to the `keepers` field, attempt to add it to the keeper cluster by pushing configuration updates to the other keepers, and then updating the clickhouse server configurations to know about the new keeper. 5. If the keeper is successfully added, as reflected in inventory, then steps 3 and 4 above will be repeated for the next keeper process. 6. If the keeper is not successfully added by the executor it will continue to retry indefinitely. 7. If the zone is expunged while the planner has it as part of its desired state in `keepers`, and the executor is trying to add it, the keeper will be removed from `keepers` in the next blueprint. If it has been added to the actual cluster by an executor in the meantime it will be removed on the next iteration of an executor.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/KeeperId" + } + }, + "max_used_keeper_id": { + "description": "Clickhouse Keeper IDs must be unique and are handed out monotonically. Keep track of the last used one.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/KeeperId" } ] }, - "ncpus": { - "description": "number of CPUs allocated for this Instance", + "max_used_server_id": { + "description": "Clickhouse Server IDs must be unique and are handed out monotonically. Keep track of the last used one.", "allOf": [ { - "$ref": "#/components/schemas/InstanceCpuCount" + "$ref": "#/components/schemas/ServerId" } ] }, - "project_id": { - "description": "id for the project containing this Instance", - "type": "string", - "format": "uuid" - }, - "run_state": { - "$ref": "#/components/schemas/InstanceState" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_last_auto_restarted": { - "nullable": true, - "description": "The timestamp of the most recent time this instance was automatically restarted by the control plane.\n\nIf this is not present, then this instance has not been automatically restarted.", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" - }, - "time_run_state_updated": { - "type": "string", - "format": "date-time" + "servers": { + "description": "The desired state of clickhouse server processes on the rack\n\nClickhouse servers do not have the same limitations as keepers and can be deployed all at once.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ServerId" + } } }, "required": [ - "auto_restart_enabled", - "description", - "hostname", - "id", - "memory", - "name", - "ncpus", - "project_id", - "run_state", - "time_created", - "time_modified", - "time_run_state_updated" + "cluster_name", + "cluster_secret", + "generation", + "highest_seen_keeper_leader_committed_log_index", + "keepers", + "max_used_keeper_id", + "max_used_server_id", + "servers" + ] + }, + "CockroachDbClusterVersion": { + "description": "CockroachDB cluster versions we are aware of.\n\nCockroachDB can be upgraded from one major version to the next, e.g. v22.1 -> v22.2. Each major version introduces changes in how it stores data on disk to support new features, and each major version has support for reading the previous version's data so that it can perform an upgrade. The version of the data format is called the \"cluster version\", which is distinct from but related to the software version that's being run.\n\nWhile software version v22.2 is using cluster version v22.1, it's possible to downgrade back to v22.1. Once the cluster version is upgraded, there's no going back.\n\nTo give us some time to evaluate new versions of the software while retaining a downgrade path, we currently deploy new versions of CockroachDB across two releases of the Oxide software, in a \"tick-tock\" model:\n\n- In \"tick\" releases, we upgrade the version of the CockroachDB software to a new major version, and update `CockroachDbClusterVersion::NEWLY_INITIALIZED`. On upgraded racks, the new version is running with the previous cluster version; on newly-initialized racks, the new version is running with the new cluser version. - In \"tock\" releases, we change `CockroachDbClusterVersion::POLICY` to the major version we upgraded to in the last \"tick\" release. This results in a new blueprint that upgrades the cluster version, destroying the downgrade path but allowing us to eventually upgrade to the next release.\n\nThese presently describe major versions of CockroachDB. The order of these must be maintained in the correct order (the first variant must be the earliest version).", + "type": "string", + "enum": [ + "V22_1" ] }, - "InstanceAutoRestartPolicy": { - "description": "A policy determining when an instance should be automatically restarted by the control plane.", + "CockroachDbPreserveDowngrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to.", "oneOf": [ { - "description": "The instance should not be automatically restarted by the control plane if it fails.", - "type": "string", - "enum": [ - "never" + "description": "Do not modify the setting.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "do_not_modify" + ] + } + }, + "required": [ + "action" ] }, { - "description": "If this instance is running and unexpectedly fails (e.g. due to a host software crash or unexpected host reboot), the control plane will make a best-effort attempt to restart it. The control plane may choose not to restart the instance to preserve the overall availability of the system.", - "type": "string", - "enum": [ - "best_effort" - ] - } - ] - }, - "InstanceCpuCount": { - "description": "The number of CPUs in an Instance", - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "InstanceCpuPlatform": { - "description": "A required CPU platform for an instance.\n\nWhen an instance specifies a required CPU platform:\n\n- The system may expose (to the VM) new CPU features that are only present on that platform (or on newer platforms of the same lineage that also support those features). - The instance must run on hosts that have CPUs that support all the features of the supplied platform.\n\nThat is, the instance is restricted to hosts that have the CPUs which support all features of the required platform, but in exchange the CPU features exposed by the platform are available for the guest to use. Note that this may prevent an instance from starting (if the hosts that could run it are full but there is capacity on other incompatible hosts).\n\nIf an instance does not specify a required CPU platform, then when it starts, the control plane selects a host for the instance and then supplies the guest with the \"minimum\" CPU platform supported by that host. This maximizes the number of hosts that can run the VM if it later needs to migrate to another host.\n\nIn all cases, the CPU features presented by a given CPU platform are a subset of what the corresponding hardware may actually support; features which cannot be used from a virtual environment or do not have full hypervisor support may be masked off. See RFD 314 for specific CPU features in a CPU platform.", - "oneOf": [ - { - "description": "An AMD Milan-like CPU platform.", - "type": "string", - "enum": [ - "amd_milan" + "description": "Ensure the setting is set to an empty string.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "allow_upgrade" + ] + } + }, + "required": [ + "action" ] }, { - "description": "An AMD Turin-like CPU platform.", - "type": "string", - "enum": [ - "amd_turin" + "description": "Ensure the setting is set to a given cluster version.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "set" + ] + }, + "data": { + "$ref": "#/components/schemas/CockroachDbClusterVersion" + } + }, + "required": [ + "action", + "data" ] } ] }, - "InstanceMigrateRequest": { - "description": "Parameters used when migrating an instance.", - "type": "object", - "properties": { - "dst_sled_id": { - "description": "The ID of the sled to which to migrate the target instance.", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "dst_sled_id" - ] - }, - "InstanceState": { - "description": "Running state of an Instance (primarily: booted or stopped)\n\nThis typically reflects whether it's starting, running, stopping, or stopped, but also includes states related to the Instance's lifecycle", + "CockroachdbUnsafeToShutdown": { "oneOf": [ { - "description": "The instance is being created.", - "type": "string", - "enum": [ - "creating" - ] - }, - { - "description": "The instance is currently starting up.", - "type": "string", - "enum": [ - "starting" - ] - }, - { - "description": "The instance is currently running.", - "type": "string", - "enum": [ - "running" - ] - }, - { - "description": "The instance has been requested to stop and a transition to \"Stopped\" is imminent.", - "type": "string", - "enum": [ - "stopping" - ] - }, - { - "description": "The instance is currently stopped.", - "type": "string", - "enum": [ - "stopped" - ] - }, - { - "description": "The instance is in the process of rebooting - it will remain in the \"rebooting\" state until the VM is starting once more.", - "type": "string", - "enum": [ - "rebooting" - ] - }, - { - "description": "The instance is in the process of migrating - it will remain in the \"migrating\" state until the migration process is complete and the destination propolis is ready to continue execution.", - "type": "string", - "enum": [ - "migrating" - ] - }, - { - "description": "The instance is attempting to recover from a failure.", - "type": "string", - "enum": [ - "repairing" + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_live_nodes_stat" + ] + } + }, + "required": [ + "type" ] }, { - "description": "The instance has encountered a failure.", - "type": "string", - "enum": [ - "failed" + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_underreplicated_stat" + ] + } + }, + "required": [ + "type" ] }, { - "description": "The instance has been deleted.", - "type": "string", - "enum": [ - "destroyed" - ] - } - ] - }, - "IpNet": { - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::IpNet", - "version": "0.1.0" - }, - "oneOf": [ + "type": "object", + "properties": { + "live_nodes": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "not_enough_live_nodes" + ] + } + }, + "required": [ + "live_nodes", + "type" + ] + }, { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Net" + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "not_enough_nodes" + ] } + }, + "required": [ + "type" ] }, { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" + "type": "object", + "properties": { + "n": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "underreplicated_ranges" + ] } + }, + "required": [ + "n", + "type" ] } ] }, - "IpRange": { + "CompressionAlgorithm": { "oneOf": [ { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Range" + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] } + }, + "required": [ + "type" ] }, { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Range" + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] } + }, + "required": [ + "type" ] - } - ] - }, - "Ipv4Net": { - "example": "192.168.1.0/24", - "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and prefix length", - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::Ipv4Net", - "version": "0.1.0" - }, - "type": "string", - "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" - }, - "Ipv4Range": { - "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", - "type": "object", - "properties": { - "first": { - "type": "string", - "format": "ipv4" - }, - "last": { - "type": "string", - "format": "ipv4" - } - }, - "required": [ - "first", - "last" - ] - }, - "Ipv6Net": { - "example": "fd12:3456::/64", - "title": "An IPv6 subnet", - "description": "An IPv6 subnet, including prefix and subnet mask", - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::Ipv6Net", - "version": "0.1.0" - }, - "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" - }, - "Ipv6Range": { - "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", - "type": "object", - "properties": { - "first": { - "type": "string", - "format": "ipv6" }, - "last": { - "type": "string", - "format": "ipv6" - } - }, - "required": [ - "first", - "last" - ] - }, - "KeeperId": { - "description": "A unique ID for a ClickHouse Keeper", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "LastResult": { - "oneOf": [ { - "description": "The task has never completed an activation", "type": "object", "properties": { - "last_result": { + "type": { "type": "string", "enum": [ - "never_completed" + "gzip" ] } }, "required": [ - "last_result" + "type" ] }, { - "description": "The task has completed at least one activation", "type": "object", "properties": { - "details": { - "$ref": "#/components/schemas/LastResultCompleted" + "level": { + "$ref": "#/components/schemas/GzipLevel" }, - "last_result": { + "type": { "type": "string", "enum": [ - "completed" + "gzip_n" ] } }, "required": [ - "details", - "last_result" + "level", + "type" ] - } - ] - }, - "LastResultCompleted": { - "type": "object", - "properties": { - "details": { - "description": "arbitrary datum emitted by the background task" }, - "elapsed": { - "description": "total time elapsed during the activation", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] } + }, + "required": [ + "type" ] }, - "iteration": { - "description": "which iteration this was (counter)", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "reason": { - "description": "what kind of event triggered this activation", - "allOf": [ - { - "$ref": "#/components/schemas/ActivationReason" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] } + }, + "required": [ + "type" ] }, - "start_time": { - "description": "wall-clock time when the activation started", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "details", - "elapsed", - "iteration", - "reason", - "start_time" - ] - }, - "LldpAdminStatus": { - "description": "To what extent should this port participate in LLDP", - "type": "string", - "enum": [ - "enabled", - "disabled", - "rx_only", - "tx_only" - ] - }, - "LldpPortConfig": { - "description": "Per-port LLDP configuration settings. Only the \"status\" setting is mandatory. All other fields have natural defaults or may be inherited from the switch.", - "type": "object", - "properties": { - "chassis_id": { - "nullable": true, - "description": "Chassis ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be inherited from the switch-level settings.", - "type": "string" - }, - "management_addrs": { - "nullable": true, - "description": "Management IP addresses to advertise. If this is not set, it will be inherited from the switch-level settings.", - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "port_description": { - "nullable": true, - "description": "Port description to advertise. If this is not set, no description will be advertised.", - "type": "string" - }, - "port_id": { - "nullable": true, - "description": "Port ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be set to the port name. e.g., qsfp0/0.", - "type": "string" - }, - "status": { - "description": "To what extent should this port participate in LLDP", - "allOf": [ - { - "$ref": "#/components/schemas/LldpAdminStatus" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] } - ] - }, - "system_description": { - "nullable": true, - "description": "System description to advertise. If this is not set, it will be inherited from the switch-level settings.", - "type": "string" - }, - "system_name": { - "nullable": true, - "description": "System name to advertise. If this is not set, it will be inherited from the switch-level settings.", - "type": "string" + }, + "required": [ + "type" + ] } - }, - "required": [ - "status" - ] - }, - "M2Slot": { - "description": "Describes an M.2 slot, often in the context of writing a system image to it.", - "type": "string", - "enum": [ - "A", - "B" ] }, - "MacAddr": { - "example": "ff:ff:ff:ff:ff:ff", - "title": "A MAC address", - "description": "A Media Access Control address, in EUI-48 format", - "type": "string", - "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", - "minLength": 5, - "maxLength": 17 - }, - "MgsDrivenUpdateStatus": { + "CrucibleDatasetCreateRequest": { "type": "object", "properties": { - "baseboard_description": { + "address": { "type": "string" }, - "host_os_phase_1": { - "$ref": "#/components/schemas/HostPhase1Status" - }, - "rot": { - "$ref": "#/components/schemas/RotStatus" - }, - "rot_bootloader": { - "$ref": "#/components/schemas/RotBootloaderStatus" + "dataset_id": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" }, - "sp": { - "$ref": "#/components/schemas/SpStatus" + "zpool_id": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" } }, "required": [ - "baseboard_description", - "host_os_phase_1", - "rot", - "rot_bootloader", - "sp" + "address", + "dataset_id", + "zpool_id" ] }, - "MgsUpdateDriverStatus": { - "description": "Status of ongoing update attempts, recently completed attempts, and update requests that are waiting for retry.", + "DatasetKind": { + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" + }, + "DiscretionaryZonePlacement": { "type": "object", "properties": { - "in_progress": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/InProgressUpdateStatus" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/InProgressUpdateStatus" - }, - "uniqueItems": true - }, - "recent": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CompletedAttempt" - } + "kind": { + "type": "string" }, - "waiting": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/WaitingStatus" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/WaitingStatus" - }, - "uniqueItems": true + "source": { + "type": "string" } }, "required": [ - "in_progress", - "recent", - "waiting" + "kind", + "source" ] }, - "MigrationRuntimeState": { - "description": "An update from a sled regarding the state of a migration, indicating the role of the VMM whose migration state was updated.", + "DiskIdentity": { + "description": "Uniquely identifies a disk.", "type": "object", "properties": { - "gen": { - "$ref": "#/components/schemas/Generation" - }, - "migration_id": { - "type": "string", - "format": "uuid" + "model": { + "type": "string" }, - "state": { - "$ref": "#/components/schemas/MigrationState" + "serial": { + "type": "string" }, - "time_updated": { - "description": "Timestamp for the migration state update.", - "type": "string", - "format": "date-time" + "vendor": { + "type": "string" } }, "required": [ - "gen", - "migration_id", - "state", - "time_updated" - ] - }, - "MigrationState": { - "description": "The state of an instance's live migration.", - "oneOf": [ - { - "description": "The migration has not started for this VMM.", - "type": "string", - "enum": [ - "pending" - ] - }, - { - "description": "The migration is in progress.", - "type": "string", - "enum": [ - "in_progress" - ] - }, - { - "description": "The migration has failed.", - "type": "string", - "enum": [ - "failed" - ] - }, - { - "description": "The migration has completed.", - "type": "string", - "enum": [ - "completed" - ] - } + "model", + "serial", + "vendor" ] }, - "Name": { - "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", - "type": "string", - "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", - "minLength": 1, - "maxLength": 63 - }, - "NatEntryView": { - "description": "NAT Record\n\nA NAT record maps an external IP address, used by an instance or externally-facing service like Nexus, to the hosting sled.", + "DiskRuntimeState": { + "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", "type": "object", "properties": { - "deleted": { - "type": "boolean" - }, - "external_address": { - "type": "string", - "format": "ip" - }, - "first_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "disk_state": { + "description": "runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskState" + } + ] }, "gen": { - "type": "integer", - "format": "int64" - }, - "last_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "sled_address": { - "type": "string", - "format": "ipv6" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "deleted", - "external_address", - "first_port", - "gen", - "last_port", - "mac", - "sled_address", - "vni" - ] - }, - "NetworkInterface": { - "description": "Information required to construct a virtual network interface", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" + "description": "generation number for this state", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] }, - "ip": { + "time_updated": { + "description": "timestamp for this information", "type": "string", - "format": "ip" - }, - "kind": { - "$ref": "#/components/schemas/NetworkInterfaceKind" - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "primary": { - "type": "boolean" - }, - "slot": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "subnet": { - "$ref": "#/components/schemas/IpNet" - }, - "transit_ips": { - "default": [], - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNet" - } - }, - "vni": { - "$ref": "#/components/schemas/Vni" + "format": "date-time" } - }, - "required": [ - "id", - "ip", - "kind", - "mac", - "name", - "primary", - "slot", - "subnet", - "vni" + }, + "required": [ + "disk_state", + "gen", + "time_updated" ] }, - "NetworkInterfaceKind": { - "description": "The type of network interface", + "DiskState": { + "description": "State of a Disk", "oneOf": [ { - "description": "A vNIC attached to a guest instance", + "description": "Disk is being initialized", "type": "object", "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { + "state": { "type": "string", "enum": [ - "instance" + "creating" ] } }, "required": [ - "id", - "type" + "state" ] }, { - "description": "A vNIC associated with an internal service", + "description": "Disk is ready but detached from any Instance", "type": "object", "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { + "state": { "type": "string", "enum": [ - "service" + "detached" ] } }, "required": [ - "id", - "type" + "state" ] }, { - "description": "A vNIC associated with a probe", + "description": "Disk is ready to receive blocks from an external source", "type": "object", "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { + "state": { "type": "string", "enum": [ - "probe" + "import_ready" ] } }, "required": [ - "id", - "type" + "state" ] - } - ] - }, - "NewPasswordHash": { - "title": "A password hash in PHC string format", - "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", - "type": "string" - }, - "NexusGenerationBumpWaitingOn": { - "oneOf": [ + }, { - "description": "Waiting for the planner to finish updating all non-Nexus zones", + "description": "Disk is importing blocks from a URL", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "found_old_non_nexus_zones" + "importing_from_url" ] } }, "required": [ - "type" + "state" ] }, { - "description": "Waiting for the planner to deploy new-generation Nexus zones", + "description": "Disk is importing blocks from bulk writes", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "missing_new_nexus_in_blueprint" + "importing_from_bulk_writes" ] } }, "required": [ - "type" + "state" ] }, { - "description": "Waiting for `db_metadata_nexus` records to be deployed for new-generation Nexus zones", + "description": "Disk is being finalized to state Detached", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "missing_nexus_database_access_records" + "finalizing" ] } }, "required": [ - "type" + "state" ] }, { - "description": "Waiting for newly deployed Nexus zones to appear to inventory", + "description": "Disk is undergoing maintenance", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "missing_new_nexus_in_inventory" + "maintenance" ] } }, "required": [ - "type" + "state" ] - } - ] - }, - "NodeName": { - "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", - "type": "string" - }, - "OmicronZoneDataset": { - "description": "Describes a persistent ZFS dataset associated with an Omicron zone", - "type": "object", - "properties": { - "pool_name": { - "$ref": "#/components/schemas/ZpoolName" - } - }, - "required": [ - "pool_name" - ] - }, - "OmicronZoneExternalFloatingAddr": { - "description": "Floating external address with port allocated to an Omicron-managed zone.", - "type": "object", - "properties": { - "addr": { - "type": "string" - }, - "id": { - "$ref": "#/components/schemas/TypedUuidForExternalIpKind" - } - }, - "required": [ - "addr", - "id" - ] - }, - "OmicronZoneExternalFloatingIp": { - "description": "Floating external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", - "type": "object", - "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForExternalIpKind" - }, - "ip": { - "type": "string", - "format": "ip" - } - }, - "required": [ - "id", - "ip" - ] - }, - "OmicronZoneExternalSnatIp": { - "description": "SNAT (outbound) external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", - "type": "object", - "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForExternalIpKind" }, - "snat_cfg": { - "$ref": "#/components/schemas/SourceNatConfig" - } - }, - "required": [ - "id", - "snat_cfg" - ] - }, - "OmicronZoneType": { - "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", - "oneOf": [ { + "description": "Disk is being attached to the given Instance", "type": "object", "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "snat_cfg": { - "description": "The SNAT configuration for outbound connections.", - "allOf": [ - { - "$ref": "#/components/schemas/SourceNatConfig" - } - ] + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "boundary_ntp" + "attaching" ] } }, "required": [ - "address", - "dns_servers", - "nic", - "ntp_servers", - "snat_cfg", - "type" + "instance", + "state" ] }, { - "description": "Type of clickhouse zone used for a single node clickhouse deployment", + "description": "Disk is attached to the given Instance", "type": "object", "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "clickhouse" + "attached" ] } }, "required": [ - "address", - "dataset", - "type" + "instance", + "state" ] }, { - "description": "A zone used to run a Clickhouse Keeper node\n\nKeepers are only used in replicated clickhouse setups", + "description": "Disk is being detached from the given Instance", "type": "object", "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "clickhouse_keeper" + "detaching" ] } }, "required": [ - "address", - "dataset", - "type" + "instance", + "state" ] }, { - "description": "A zone used to run a Clickhouse Server in a replicated deployment", + "description": "Disk has been destroyed", "type": "object", "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { + "state": { "type": "string", "enum": [ - "clickhouse_server" + "destroyed" ] } - }, - "required": [ - "address", - "dataset", - "type" + }, + "required": [ + "state" ] }, { + "description": "Disk is unavailable", "type": "object", "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { + "state": { "type": "string", "enum": [ - "cockroach_db" + "faulted" ] } }, "required": [ - "address", - "dataset", - "type" + "state" ] + } + ] + }, + "DnsConfigParams": { + "type": "object", + "properties": { + "generation": { + "$ref": "#/components/schemas/Generation" + }, + "serial": { + "description": "See [`DnsConfig`]'s `serial` field for how this is different from `generation`", + "type": "integer", + "format": "uint32", + "minimum": 0 }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsConfigZone" + } + } + }, + "required": [ + "generation", + "serial", + "time_created", + "zones" + ] + }, + "DnsConfigZone": { + "description": "Configuration for a specific DNS zone, as opposed to illumos zones in which the services described by these records run.\n\nThe name `@` is special: it describes records that should be provided for queries about `zone_name`. This is used in favor of the empty string as `@` is the name used for this purpose in zone files for most DNS configurations. It also avoids potentially-confusing debug output from naively printing out records and their names - if you've seen an `@` record and tools are unclear about what that means, hopefully you've arrived here!", + "type": "object", + "properties": { + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsRecord" + } + } + }, + "zone_name": { + "type": "string" + } + }, + "required": [ + "records", + "zone_name" + ] + }, + "DnsRecord": { + "oneOf": [ { "type": "object", "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" + "data": { + "type": "string", + "format": "ipv4" }, "type": { "type": "string", "enum": [ - "crucible" + "A" ] } }, "required": [ - "address", - "dataset", + "data", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" + "data": { + "type": "string", + "format": "ipv6" }, "type": { "type": "string", "enum": [ - "crucible_pantry" + "AAAA" ] } }, "required": [ - "address", + "data", "type" ] }, { "type": "object", "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "description": "The address at which the external DNS server is reachable.", - "type": "string" - }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] + "data": { + "$ref": "#/components/schemas/Srv" }, "type": { "type": "string", "enum": [ - "external_dns" + "SRV" ] } }, "required": [ - "dataset", - "dns_address", - "http_address", - "nic", + "data", "type" ] }, { "type": "object", "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "type": "string" - }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "http_address": { + "data": { "type": "string" }, "type": { "type": "string", "enum": [ - "internal_dns" + "NS" ] } }, "required": [ - "dataset", - "dns_address", - "gz_address", - "gz_address_index", - "http_address", + "data", "type" ] + } + ] + }, + "DownstairsClientStopRequest": { + "type": "object", + "properties": { + "reason": { + "$ref": "#/components/schemas/DownstairsClientStopRequestReason" + }, + "time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "reason", + "time" + ] + }, + "DownstairsClientStopRequestReason": { + "type": "string", + "enum": [ + "replacing", + "disabled", + "failed_reconcile", + "i_o_error", + "bad_negotiation_order", + "incompatible", + "failed_live_repair", + "too_many_outstanding_jobs", + "deactivated" + ] + }, + "DownstairsClientStopped": { + "type": "object", + "properties": { + "reason": { + "$ref": "#/components/schemas/DownstairsClientStoppedReason" + }, + "time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "reason", + "time" + ] + }, + "DownstairsClientStoppedReason": { + "type": "string", + "enum": [ + "connection_timeout", + "connection_failed", + "timeout", + "write_failed", + "read_failed", + "requested_stop", + "finished", + "queue_closed", + "receive_task_cancelled" + ] + }, + "DownstairsUnderRepair": { + "type": "object", + "properties": { + "region_uuid": { + "$ref": "#/components/schemas/TypedUuidForDownstairsRegionKind" + }, + "target_addr": { + "type": "string" + } + }, + "required": [ + "region_uuid", + "target_addr" + ] + }, + "Duration": { + "type": "object", + "properties": { + "nanos": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "secs": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "nanos", + "secs" + ] + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "internal_ntp" - ] - } - }, - "required": [ - "address", - "type" - ] + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "ExpectedActiveRotSlot": { + "description": "Describes the expected active RoT slot, and the version we expect to find for it", + "type": "object", + "properties": { + "slot": { + "$ref": "#/components/schemas/RotSlot" }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" + } + }, + "required": [ + "slot", + "version" + ] + }, + "ExpectedVersion": { + "description": "Describes the version that we expect to find in some firmware slot", + "oneOf": [ { + "description": "We expect to find _no_ valid caboose in this slot", "type": "object", "properties": { - "external_dns_servers": { - "description": "External DNS servers Nexus can use to resolve external hosts.", - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "external_ip": { - "description": "The address at which the external nexus server is reachable.", - "type": "string", - "format": "ip" - }, - "external_tls": { - "description": "Whether Nexus's external endpoint should use TLS", - "type": "boolean" - }, - "internal_address": { - "description": "The address at which the internal nexus server is reachable.", - "type": "string" - }, - "lockstep_port": { - "description": "The port at which the internal lockstep server is reachable. This shares the same IP address with `internal_address`.", - "default": 12232, - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "type": { + "kind": { "type": "string", "enum": [ - "nexus" + "no_valid_version" ] } }, "required": [ - "external_dns_servers", - "external_ip", - "external_tls", - "internal_address", - "nic", - "type" + "kind" ] }, { + "description": "We expect to find the specified version in this slot", "type": "object", "properties": { - "address": { - "type": "string" - }, - "type": { + "kind": { "type": "string", "enum": [ - "oximeter" + "version" ] + }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" } }, "required": [ - "address", - "type" + "kind", + "version" ] } ] }, - "OximeterInfo": { - "description": "Message used to notify Nexus that this oximeter instance is up and running.", - "type": "object", - "properties": { - "address": { - "description": "The address on which this oximeter instance listens for requests", - "type": "string" - }, - "collector_id": { - "description": "The ID for this oximeter instance.", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "address", - "collector_id" - ] - }, - "OximeterReadMode": { - "description": "Where oximeter should read from", + "ExternalPortDiscovery": { "oneOf": [ { "type": "object", "properties": { - "type": { - "type": "string", - "enum": [ - "single_node" - ] + "auto": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "ipv6" + } } }, "required": [ - "type" - ] + "auto" + ], + "additionalProperties": false }, { "type": "object", "properties": { - "type": { - "type": "string", - "enum": [ - "cluster" - ] + "static": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Name" + } + } } }, "required": [ - "type" - ] + "static" + ], + "additionalProperties": false } ] }, - "OximeterReadPolicy": { + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "IdMapBlueprintDatasetConfig": { "type": "object", - "properties": { - "mode": { - "$ref": "#/components/schemas/OximeterReadMode" - }, - "time_created": { - "type": "string", - "format": "date-time" - }, - "version": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - "required": [ - "mode", - "time_created", - "version" - ] + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintDatasetConfig" + } }, - "PendingMgsUpdate": { + "IdMapBlueprintPhysicalDiskConfig": { "type": "object", - "properties": { - "artifact_hash": { - "description": "which artifact to apply to this device", - "type": "string", - "format": "hex string (32 bytes)" - }, - "artifact_version": { - "$ref": "#/components/schemas/ArtifactVersion" - }, - "baseboard_id": { - "description": "id of the baseboard that we're going to update", - "allOf": [ - { - "$ref": "#/components/schemas/BaseboardId" - } - ] - }, - "details": { - "description": "component-specific details of the pending update", - "allOf": [ - { - "$ref": "#/components/schemas/PendingMgsUpdateDetails" - } - ] - }, - "slot_id": { - "description": "last known MGS slot (cubby number) of the baseboard", - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "sp_type": { - "description": "what type of baseboard this is", - "allOf": [ - { - "$ref": "#/components/schemas/SpType" - } - ] - } - }, - "required": [ - "artifact_hash", - "artifact_version", - "baseboard_id", - "details", - "slot_id", - "sp_type" - ] + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintPhysicalDiskConfig" + } }, - "PendingMgsUpdateDetails": { - "description": "Describes the component-specific details of a PendingMgsUpdate", + "IdMapBlueprintZoneConfig": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + }, + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", "oneOf": [ { - "description": "the SP itself is being updated", + "description": "Do not perform any filtering.", "type": "object", "properties": { - "component": { + "type": { "type": "string", "enum": [ - "sp" - ] - }, - "expected_active_version": { - "description": "expected contents of the active slot", - "allOf": [ - { - "$ref": "#/components/schemas/ArtifactVersion" - } - ] - }, - "expected_inactive_version": { - "description": "expected contents of the inactive slot", - "allOf": [ - { - "$ref": "#/components/schemas/ExpectedVersion" - } + "no_filtering" ] } }, "required": [ - "component", - "expected_active_version", - "expected_inactive_version" + "type" ] }, { - "description": "the RoT is being updated", "type": "object", "properties": { - "component": { + "type": { "type": "string", "enum": [ - "rot" - ] - }, - "expected_active_slot": { - "$ref": "#/components/schemas/ExpectedActiveRotSlot" - }, - "expected_inactive_version": { - "$ref": "#/components/schemas/ExpectedVersion" - }, - "expected_pending_persistent_boot_preference": { - "nullable": true, - "description": "the persistent boot preference written into the CFPA scratch page that will become the persistent boot preference in the authoritative CFPA page upon reboot, unless CFPA update of the authoritative page fails for some reason.", - "allOf": [ - { - "$ref": "#/components/schemas/RotSlot" - } - ] - }, - "expected_persistent_boot_preference": { - "description": "the persistent boot preference written into the current authoritative CFPA page (ping or pong)", - "allOf": [ - { - "$ref": "#/components/schemas/RotSlot" - } + "allow" ] }, - "expected_transient_boot_preference": { - "nullable": true, - "description": "override persistent preference selection for a single boot", - "allOf": [ - { - "$ref": "#/components/schemas/RotSlot" - } - ] + "value": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } } }, "required": [ - "component", - "expected_active_slot", - "expected_inactive_version", - "expected_persistent_boot_preference" + "type", + "value" ] - }, + } + ] + }, + "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, + "oneOf": [ { - "description": "the RoT bootloader is being updated", - "type": "object", - "properties": { - "component": { - "type": "string", - "enum": [ - "rot_bootloader" - ] - }, - "expected_stage0_next_version": { - "description": "expected contents of the stage 0 next", - "allOf": [ - { - "$ref": "#/components/schemas/ExpectedVersion" - } - ] - }, - "expected_stage0_version": { - "description": "expected contents of the stage 0", - "allOf": [ - { - "$ref": "#/components/schemas/ArtifactVersion" - } - ] + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" } - }, - "required": [ - "component", - "expected_stage0_next_version", - "expected_stage0_version" ] }, { - "description": "the host OS is being updated\n\nWe write the phase 1 via MGS, and have a precheck condition that sled-agent has already written the matching phase 2.", - "type": "object", - "properties": { - "component": { - "type": "string", - "enum": [ - "host_phase1" - ] - }, - "expected_active_phase_1_hash": { - "description": "The hash of the phase 1 slot specified by `expected_active_phase_1_hash`.\n\nWe should always be able to fetch this. Even if the phase 1 contents themselves have been corrupted (very scary for the active slot!), the SP can still hash those contents.", - "type": "string", - "format": "hex string (32 bytes)" - }, - "expected_active_phase_1_slot": { - "description": "Which slot is currently active according to the SP.\n\nThis controls which slot will be used the next time the sled boots; it will _usually_ match `boot_disk`, but differs in the window of time between telling the SP to change which slot to use and the host OS rebooting to actually use that slot.", - "allOf": [ - { - "$ref": "#/components/schemas/M2Slot" - } - ] - }, - "expected_active_phase_2_hash": { - "description": "The hash of the currently-active phase 2 artifact.\n\nIt's possible sled-agent won't be able to report this value, but that would indicate that we don't know the version currently running. The planner wouldn't stage an update without knowing the current version, so if something has gone wrong in the meantime we won't proceede either.", - "type": "string", - "format": "hex string (32 bytes)" - }, - "expected_boot_disk": { - "description": "Which slot the host OS most recently booted from.", - "allOf": [ - { - "$ref": "#/components/schemas/M2Slot" - } - ] - }, - "expected_inactive_phase_1_hash": { - "description": "The hash of the phase 1 slot specified by toggling `expected_active_phase_1_slot` to the other slot.\n\nWe should always be able to fetch this. Even if the phase 1 contents of the inactive slot are entirely bogus, the SP can still hash those contents.", - "type": "string", - "format": "hex string (32 bytes)" - }, - "expected_inactive_phase_2_hash": { - "description": "The hash of the currently-inactive phase 2 artifact.\n\nIt's entirely possible that a sled needing a host OS update has no valid artifact in its inactive slot. However, a precondition for us performing a phase 1 update is that `sled-agent` on the target sled has already written the paired phase 2 artifact to the inactive slot; therefore, we don't need to be able to represent an invalid inactive slot.", - "type": "string", - "format": "hex string (32 bytes)" - }, - "sled_agent_address": { - "description": "Address for contacting sled-agent to check phase 2 contents.", - "type": "string" + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" } - }, - "required": [ - "component", - "expected_active_phase_1_hash", - "expected_active_phase_1_slot", - "expected_active_phase_2_hash", - "expected_boot_disk", - "expected_inactive_phase_1_hash", - "expected_inactive_phase_2_hash", - "sled_agent_address" ] } ] }, - "PendingMgsUpdates": { - "type": "object", - "properties": { - "by_baseboard": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/PendingMgsUpdate" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/PendingMgsUpdate" - }, - "uniqueItems": true - } - }, - "required": [ - "by_baseboard" - ] - }, - "PendingRecovery": { - "description": "Snapshot of reassignment state when a recovery pass started", - "type": "object", - "properties": { - "blueprint_id": { - "nullable": true, - "description": "which blueprint id we'd be fully caught up to upon completion", + "IpRange": { + "oneOf": [ + { + "title": "v4", "allOf": [ { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + "$ref": "#/components/schemas/Ipv4Range" } ] }, - "generation": { - "description": "what `reassignment_generation` was when this recovery started", + { + "title": "v6", "allOf": [ { - "$ref": "#/components/schemas/Generation" + "$ref": "#/components/schemas/Ipv6Range" } ] } + ] + }, + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + }, + "Ipv4Range": { + "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", + "type": "object", + "properties": { + "first": { + "type": "string", + "format": "ipv4" + }, + "last": { + "type": "string", + "format": "ipv4" + } }, "required": [ - "generation" + "first", + "last" ] }, - "PendingSagaInfo": { - "description": "Describes a pending saga (for debugging why quiesce is stuck)", + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + }, + "Ipv6Range": { + "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", "type": "object", "properties": { - "recovered": { - "description": "If true, we know the saga needs to be recovered. It may or may not be running already.\n\nIf false, this saga was created in this Nexus process's lifetime. It's still running.", - "type": "boolean" - }, - "saga_id": { + "first": { "type": "string", - "format": "uuid" - }, - "saga_name": { - "type": "string" + "format": "ipv6" }, - "time_pending": { + "last": { "type": "string", - "format": "date-time" + "format": "ipv6" } }, "required": [ - "recovered", - "saga_id", - "saga_name", - "time_pending" + "first", + "last" ] }, - "PhysicalDiskKind": { - "description": "Describes the form factor of physical disks.", + "KeeperId": { + "description": "A unique ID for a ClickHouse Keeper", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "LldpAdminStatus": { + "description": "To what extent should this port participate in LLDP", "type": "string", "enum": [ - "m2", - "u2" - ] - }, - "PhysicalDiskPath": { - "type": "object", - "properties": { - "disk_id": { - "description": "ID of the physical disk", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "disk_id" + "enabled", + "disabled", + "rx_only", + "tx_only" ] }, - "PhysicalDiskPutRequest": { + "LldpPortConfig": { + "description": "Per-port LLDP configuration settings. Only the \"status\" setting is mandatory. All other fields have natural defaults or may be inherited from the switch.", "type": "object", "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" - }, - "model": { - "type": "string" - }, - "serial": { + "chassis_id": { + "nullable": true, + "description": "Chassis ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be inherited from the switch-level settings.", "type": "string" }, - "sled_id": { - "type": "string", - "format": "uuid" + "management_addrs": { + "nullable": true, + "description": "Management IP addresses to advertise. If this is not set, it will be inherited from the switch-level settings.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } }, - "variant": { - "$ref": "#/components/schemas/PhysicalDiskKind" + "port_description": { + "nullable": true, + "description": "Port description to advertise. If this is not set, no description will be advertised.", + "type": "string" }, - "vendor": { + "port_id": { + "nullable": true, + "description": "Port ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be set to the port name. e.g., qsfp0/0.", "type": "string" - } - }, - "required": [ - "id", - "model", - "serial", - "sled_id", - "variant", - "vendor" - ] - }, - "Ping": { - "type": "object", - "properties": { + }, "status": { - "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "description": "To what extent should this port participate in LLDP", "allOf": [ { - "$ref": "#/components/schemas/PingStatus" + "$ref": "#/components/schemas/LldpAdminStatus" } ] + }, + "system_description": { + "nullable": true, + "description": "System description to advertise. If this is not set, it will be inherited from the switch-level settings.", + "type": "string" + }, + "system_name": { + "nullable": true, + "description": "System name to advertise. If this is not set, it will be inherited from the switch-level settings.", + "type": "string" } }, "required": [ "status" ] }, - "PingStatus": { + "M2Slot": { + "description": "Describes an M.2 slot, often in the context of writing a system image to it.", "type": "string", "enum": [ - "ok" + "A", + "B" ] }, - "PlannerConfig": { - "type": "object", - "properties": { - "add_zones_with_mupdate_override": { - "description": "Whether to add zones even if a mupdate override is present.\n\nOnce Nexus-driven update is active on a customer system, we must not add new zones while the system is recovering from a MUPdate.\n\nThis setting, which is off by default, allows us to add zones even if we've detected a recent MUPdate on the system.", - "type": "boolean" - } - }, - "required": [ - "add_zones_with_mupdate_override" - ] + "MacAddr": { + "example": "ff:ff:ff:ff:ff:ff", + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", + "minLength": 5, + "maxLength": 17 }, - "PlanningAddOutOfEligibleSleds": { - "description": "How many discretionary zones we actually placed out of how many we wanted to place.", + "MigrationRuntimeState": { + "description": "An update from a sled regarding the state of a migration, indicating the role of the VMM whose migration state was updated.", "type": "object", "properties": { - "placed": { - "type": "integer", - "format": "uint", - "minimum": 0 + "gen": { + "$ref": "#/components/schemas/Generation" }, - "wanted_to_place": { - "type": "integer", - "format": "uint", - "minimum": 0 + "migration_id": { + "type": "string", + "format": "uuid" + }, + "state": { + "$ref": "#/components/schemas/MigrationState" + }, + "time_updated": { + "description": "Timestamp for the migration state update.", + "type": "string", + "format": "date-time" } }, "required": [ - "placed", - "wanted_to_place" + "gen", + "migration_id", + "state", + "time_updated" ] }, - "PlanningAddStepReport": { - "type": "object", - "properties": { - "add_update_blocked_reasons": { - "description": "Reasons why zone adds and any updates are blocked.\n\nThis is typically a list of MUPdate-related reasons.", - "type": "array", - "items": { - "type": "string" - } - }, - "add_zones_with_mupdate_override": { - "description": "The value of the homonymous planner config. (What this really means is that zone adds happen despite being blocked by one or more MUPdate-related reasons.)", - "type": "boolean" - }, - "discretionary_zones_placed": { - "description": "Sled ID → kinds of discretionary zones placed there", - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DiscretionaryZonePlacement" - } - } - }, - "out_of_eligible_sleds": { - "description": "Discretionary zone kind → (placed, wanted to place)", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningAddOutOfEligibleSleds" - } - }, - "sleds_getting_ntp_and_discretionary_zones": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "uniqueItems": true - }, - "sleds_missing_crucible_zone": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForZpoolKind" - } - } - }, - "sleds_missing_ntp_zone": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "uniqueItems": true - }, - "sleds_waiting_for_ntp_zone": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "uniqueItems": true - }, - "sleds_without_ntp_zones_in_inventory": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "uniqueItems": true + "MigrationState": { + "description": "The state of an instance's live migration.", + "oneOf": [ + { + "description": "The migration has not started for this VMM.", + "type": "string", + "enum": [ + "pending" + ] }, - "sleds_without_zpools_for_ntp_zones": { - "type": "array", - "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "uniqueItems": true + { + "description": "The migration is in progress.", + "type": "string", + "enum": [ + "in_progress" + ] }, - "sufficient_zones_exist": { - "description": "Discretionary zone kind → (wanted to place, num existing)", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningAddSufficientZonesExist" - } + { + "description": "The migration has failed.", + "type": "string", + "enum": [ + "failed" + ] }, - "waiting_on": { - "nullable": true, - "description": "What are we waiting on to start zone additions?", - "allOf": [ - { - "$ref": "#/components/schemas/ZoneAddWaitingOn" - } + { + "description": "The migration has completed.", + "type": "string", + "enum": [ + "completed" ] } - }, - "required": [ - "add_update_blocked_reasons", - "add_zones_with_mupdate_override", - "discretionary_zones_placed", - "out_of_eligible_sleds", - "sleds_getting_ntp_and_discretionary_zones", - "sleds_missing_crucible_zone", - "sleds_missing_ntp_zone", - "sleds_waiting_for_ntp_zone", - "sleds_without_ntp_zones_in_inventory", - "sleds_without_zpools_for_ntp_zones", - "sufficient_zones_exist" ] }, - "PlanningAddSufficientZonesExist": { - "description": "We have at least the minimum required number of zones of a given kind.", + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "NatEntryView": { + "description": "NAT Record\n\nA NAT record maps an external IP address, used by an instance or externally-facing service like Nexus, to the hosting sled.", "type": "object", "properties": { - "num_existing": { + "deleted": { + "type": "boolean" + }, + "external_address": { + "type": "string", + "format": "ip" + }, + "first_port": { "type": "integer", - "format": "uint", + "format": "uint16", "minimum": 0 }, - "target_count": { + "gen": { "type": "integer", - "format": "uint", + "format": "int64" + }, + "last_port": { + "type": "integer", + "format": "uint16", "minimum": 0 + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "sled_address": { + "type": "string", + "format": "ipv6" + }, + "vni": { + "$ref": "#/components/schemas/Vni" } }, "required": [ - "num_existing", - "target_count" - ] - }, - "PlanningCockroachdbSettingsStepReport": { - "type": "object", - "properties": { - "preserve_downgrade": { - "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" - } - }, - "required": [ - "preserve_downgrade" + "deleted", + "external_address", + "first_port", + "gen", + "last_port", + "mac", + "sled_address", + "vni" ] }, - "PlanningDecommissionStepReport": { + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", "type": "object", "properties": { - "zombie_sleds": { - "description": "Decommissioned sleds that unexpectedly appeared as commissioned.", + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "transit_ips": { + "default": [], "type": "array", "items": { - "$ref": "#/components/schemas/TypedUuidForSledKind" + "$ref": "#/components/schemas/IpNet" } + }, + "vni": { + "$ref": "#/components/schemas/Vni" } }, "required": [ - "zombie_sleds" + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" ] }, - "PlanningExpungeStepReport": { - "type": "object", - "properties": { - "orphan_disks": { - "description": "Expunged disks not present in the parent blueprint.", + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" - } + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] } - }, - "required": [ - "orphan_disks" ] }, - "PlanningMgsUpdatesStepReport": { - "type": "object", - "properties": { - "pending_mgs_updates": { - "$ref": "#/components/schemas/PendingMgsUpdates" - } - }, - "required": [ - "pending_mgs_updates" - ] + "NewPasswordHash": { + "title": "A password hash in PHC string format", + "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", + "type": "string" }, - "PlanningNexusGenerationBumpReport": { + "NexusGenerationBumpWaitingOn": { "oneOf": [ { - "description": "We have no reason to bump the Nexus generation number.", + "description": "Waiting for the planner to finish updating all non-Nexus zones", "type": "object", "properties": { - "component": { + "type": { "type": "string", "enum": [ - "nothing_to_report" + "found_old_non_nexus_zones" ] } }, "required": [ - "component" + "type" ] }, { - "description": "We are waiting on some condition before we can bump the Nexus generation.", + "description": "Waiting for the planner to deploy new-generation Nexus zones", "type": "object", "properties": { - "component": { + "type": { + "type": "string", + "enum": [ + "missing_new_nexus_in_blueprint" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for `db_metadata_nexus` records to be deployed for new-generation Nexus zones", + "type": "object", + "properties": { + "type": { "type": "string", "enum": [ - "waiting_on" + "missing_nexus_database_access_records" ] - }, - "value": { - "$ref": "#/components/schemas/NexusGenerationBumpWaitingOn" } }, "required": [ - "component", - "value" + "type" ] }, { - "description": "We are bumping the Nexus generation number to this value.", + "description": "Waiting for newly deployed Nexus zones to appear to inventory", "type": "object", "properties": { - "component": { + "type": { "type": "string", "enum": [ - "bumping_generation" + "missing_new_nexus_in_inventory" ] - }, - "value": { - "$ref": "#/components/schemas/Generation" } }, "required": [ - "component", - "value" + "type" ] } ] }, - "PlanningNoopImageSourceConverted": { - "description": "How many of the total install-dataset zones and/or host phase 2 slots were noop-converted to use the artifact store on a particular sled.", + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", "type": "object", "properties": { - "host_phase_2_slot_a_eligible": { - "type": "boolean" + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "pool_name" + ] + }, + "OmicronZoneExternalFloatingAddr": { + "description": "Floating external address with port allocated to an Omicron-managed zone.", + "type": "object", + "properties": { + "addr": { + "type": "string" }, - "host_phase_2_slot_b_eligible": { - "type": "boolean" + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + } + }, + "required": [ + "addr", + "id" + ] + }, + "OmicronZoneExternalFloatingIp": { + "description": "Floating external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" }, - "num_dataset": { - "type": "integer", - "format": "uint", - "minimum": 0 + "ip": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "id", + "ip" + ] + }, + "OmicronZoneExternalSnatIp": { + "description": "SNAT (outbound) external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" }, - "num_eligible": { - "type": "integer", - "format": "uint", - "minimum": 0 + "snat_cfg": { + "$ref": "#/components/schemas/SourceNatConfig" } }, "required": [ - "host_phase_2_slot_a_eligible", - "host_phase_2_slot_b_eligible", - "num_dataset", - "num_eligible" + "id", + "snat_cfg" ] }, - "PlanningNoopImageSourceSkipSledHostPhase2Reason": { + "OximeterInfo": { + "description": "Message used to notify Nexus that this oximeter instance is up and running.", + "type": "object", + "properties": { + "address": { + "description": "The address on which this oximeter instance listens for requests", + "type": "string" + }, + "collector_id": { + "description": "The ID for this oximeter instance.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "address", + "collector_id" + ] + }, + "OximeterReadMode": { + "description": "Where oximeter should read from", "oneOf": [ { "type": "object", @@ -7284,7 +3829,7 @@ "type": { "type": "string", "enum": [ - "both_slots_already_artifact" + "single_node" ] } }, @@ -7298,7 +3843,7 @@ "type": { "type": "string", "enum": [ - "sled_not_in_inventory" + "cluster" ] } }, @@ -7308,1545 +3853,1538 @@ } ] }, - "PlanningNoopImageSourceSkipSledZonesReason": { - "oneOf": [ - { - "type": "object", - "properties": { - "num_total": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "all_zones_already_artifact" - ] + "PendingMgsUpdate": { + "type": "object", + "properties": { + "artifact_hash": { + "description": "which artifact to apply to this device", + "type": "string", + "format": "hex string (32 bytes)" + }, + "artifact_version": { + "$ref": "#/components/schemas/ArtifactVersion" + }, + "baseboard_id": { + "description": "id of the baseboard that we're going to update", + "allOf": [ + { + "$ref": "#/components/schemas/BaseboardId" } - }, - "required": [ - "num_total", - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "sled_not_in_inventory" - ] + "details": { + "description": "component-specific details of the pending update", + "allOf": [ + { + "$ref": "#/components/schemas/PendingMgsUpdateDetails" } - }, - "required": [ - "type" ] }, + "slot_id": { + "description": "last known MGS slot (cubby number) of the baseboard", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "sp_type": { + "description": "what type of baseboard this is", + "allOf": [ + { + "$ref": "#/components/schemas/SpType" + } + ] + } + }, + "required": [ + "artifact_hash", + "artifact_version", + "baseboard_id", + "details", + "slot_id", + "sp_type" + ] + }, + "PendingMgsUpdateDetails": { + "description": "Describes the component-specific details of a PendingMgsUpdate", + "oneOf": [ { + "description": "the SP itself is being updated", "type": "object", "properties": { - "error": { - "type": "string" - }, - "type": { + "component": { "type": "string", "enum": [ - "error_retrieving_zone_manifest" + "sp" + ] + }, + "expected_active_version": { + "description": "expected contents of the active slot", + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactVersion" + } + ] + }, + "expected_inactive_version": { + "description": "expected contents of the inactive slot", + "allOf": [ + { + "$ref": "#/components/schemas/ExpectedVersion" + } ] } }, "required": [ - "error", - "type" + "component", + "expected_active_version", + "expected_inactive_version" ] }, { + "description": "the RoT is being updated", "type": "object", "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" - }, - "type": { + "component": { "type": "string", "enum": [ - "remove_mupdate_override" + "rot" + ] + }, + "expected_active_slot": { + "$ref": "#/components/schemas/ExpectedActiveRotSlot" + }, + "expected_inactive_version": { + "$ref": "#/components/schemas/ExpectedVersion" + }, + "expected_pending_persistent_boot_preference": { + "nullable": true, + "description": "the persistent boot preference written into the CFPA scratch page that will become the persistent boot preference in the authoritative CFPA page upon reboot, unless CFPA update of the authoritative page fails for some reason.", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "expected_persistent_boot_preference": { + "description": "the persistent boot preference written into the current authoritative CFPA page (ping or pong)", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "expected_transient_boot_preference": { + "nullable": true, + "description": "override persistent preference selection for a single boot", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } ] } }, "required": [ - "id", - "type" + "component", + "expected_active_slot", + "expected_inactive_version", + "expected_persistent_boot_preference" ] - } - ] - }, - "PlanningNoopImageSourceSkipZoneReason": { - "oneOf": [ + }, { + "description": "the RoT bootloader is being updated", "type": "object", "properties": { - "file_name": { - "type": "string" - }, - "type": { + "component": { "type": "string", "enum": [ - "zone_not_in_manifest" + "rot_bootloader" ] }, - "zone_kind": { - "type": "string" + "expected_stage0_next_version": { + "description": "expected contents of the stage 0 next", + "allOf": [ + { + "$ref": "#/components/schemas/ExpectedVersion" + } + ] + }, + "expected_stage0_version": { + "description": "expected contents of the stage 0", + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactVersion" + } + ] } }, "required": [ - "file_name", - "type", - "zone_kind" + "component", + "expected_stage0_next_version", + "expected_stage0_version" ] }, { + "description": "the host OS is being updated\n\nWe write the phase 1 via MGS, and have a precheck condition that sled-agent has already written the matching phase 2.", "type": "object", "properties": { - "error": { - "type": "string" - }, - "file_name": { - "type": "string" - }, - "type": { + "component": { "type": "string", "enum": [ - "invalid_artifact" + "host_phase1" ] }, - "zone_kind": { - "type": "string" - } - }, - "required": [ - "error", - "file_name", - "type", - "zone_kind" - ] - }, - { - "type": "object", - "properties": { - "artifact_hash": { + "expected_active_phase_1_hash": { + "description": "The hash of the phase 1 slot specified by `expected_active_phase_1_hash`.\n\nWe should always be able to fetch this. Even if the phase 1 contents themselves have been corrupted (very scary for the active slot!), the SP can still hash those contents.", "type": "string", "format": "hex string (32 bytes)" }, - "file_name": { - "type": "string" + "expected_active_phase_1_slot": { + "description": "Which slot is currently active according to the SP.\n\nThis controls which slot will be used the next time the sled boots; it will _usually_ match `boot_disk`, but differs in the window of time between telling the SP to change which slot to use and the host OS rebooting to actually use that slot.", + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } + ] }, - "type": { + "expected_active_phase_2_hash": { + "description": "The hash of the currently-active phase 2 artifact.\n\nIt's possible sled-agent won't be able to report this value, but that would indicate that we don't know the version currently running. The planner wouldn't stage an update without knowing the current version, so if something has gone wrong in the meantime we won't proceede either.", "type": "string", - "enum": [ - "artifact_not_in_repo" + "format": "hex string (32 bytes)" + }, + "expected_boot_disk": { + "description": "Which slot the host OS most recently booted from.", + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } ] }, - "zone_kind": { + "expected_inactive_phase_1_hash": { + "description": "The hash of the phase 1 slot specified by toggling `expected_active_phase_1_slot` to the other slot.\n\nWe should always be able to fetch this. Even if the phase 1 contents of the inactive slot are entirely bogus, the SP can still hash those contents.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "expected_inactive_phase_2_hash": { + "description": "The hash of the currently-inactive phase 2 artifact.\n\nIt's entirely possible that a sled needing a host OS update has no valid artifact in its inactive slot. However, a precondition for us performing a phase 1 update is that `sled-agent` on the target sled has already written the paired phase 2 artifact to the inactive slot; therefore, we don't need to be able to represent an invalid inactive slot.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "sled_agent_address": { + "description": "Address for contacting sled-agent to check phase 2 contents.", "type": "string" } }, "required": [ - "artifact_hash", - "file_name", - "type", - "zone_kind" + "component", + "expected_active_phase_1_hash", + "expected_active_phase_1_slot", + "expected_active_phase_2_hash", + "expected_boot_disk", + "expected_inactive_phase_1_hash", + "expected_inactive_phase_2_hash", + "sled_agent_address" ] } ] }, - "PlanningNoopImageSourceStepReport": { + "PendingMgsUpdates": { "type": "object", "properties": { - "converted": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningNoopImageSourceConverted" - } + "by_baseboard": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/PendingMgsUpdate" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/PendingMgsUpdate" + }, + "uniqueItems": true + } + }, + "required": [ + "by_baseboard" + ] + }, + "PhysicalDiskKind": { + "description": "Describes the form factor of physical disks.", + "type": "string", + "enum": [ + "m2", + "u2" + ] + }, + "PhysicalDiskPutRequest": { + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" }, - "no_target_release": { - "type": "boolean" + "model": { + "type": "string" }, - "skipped_sled_host_phase_2": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledHostPhase2Reason" - } + "serial": { + "type": "string" }, - "skipped_sled_zones": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledZonesReason" - } + "sled_id": { + "type": "string", + "format": "uuid" }, - "skipped_zones": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/PlanningNoopImageSourceSkipZoneReason" - } + "variant": { + "$ref": "#/components/schemas/PhysicalDiskKind" + }, + "vendor": { + "type": "string" } }, "required": [ - "converted", - "no_target_release", - "skipped_sled_host_phase_2", - "skipped_sled_zones", - "skipped_zones" + "id", + "model", + "serial", + "sled_id", + "variant", + "vendor" ] }, - "PlanningOutOfDateZone": { - "description": "We have at least the minimum required number of zones of a given kind.", + "Ping": { "type": "object", "properties": { - "desired_image_source": { - "$ref": "#/components/schemas/BlueprintZoneImageSource" + "status": { + "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "allOf": [ + { + "$ref": "#/components/schemas/PingStatus" + } + ] + } + }, + "required": [ + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + }, + "PlannerConfig": { + "type": "object", + "properties": { + "add_zones_with_mupdate_override": { + "description": "Whether to add zones even if a mupdate override is present.\n\nOnce Nexus-driven update is active on a customer system, we must not add new zones while the system is recovering from a MUPdate.\n\nThis setting, which is off by default, allows us to add zones even if we've detected a recent MUPdate on the system.", + "type": "boolean" + } + }, + "required": [ + "add_zones_with_mupdate_override" + ] + }, + "PlanningAddOutOfEligibleSleds": { + "description": "How many discretionary zones we actually placed out of how many we wanted to place.", + "type": "object", + "properties": { + "placed": { + "type": "integer", + "format": "uint", + "minimum": 0 }, - "zone_config": { - "$ref": "#/components/schemas/BlueprintZoneConfig" + "wanted_to_place": { + "type": "integer", + "format": "uint", + "minimum": 0 } }, "required": [ - "desired_image_source", - "zone_config" + "placed", + "wanted_to_place" ] }, - "PlanningZoneUpdatesStepReport": { + "PlanningAddStepReport": { "type": "object", "properties": { - "expunged_zones": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/BlueprintZoneConfig" - } + "add_update_blocked_reasons": { + "description": "Reasons why zone adds and any updates are blocked.\n\nThis is typically a list of MUPdate-related reasons.", + "type": "array", + "items": { + "type": "string" } }, - "out_of_date_zones": { + "add_zones_with_mupdate_override": { + "description": "The value of the homonymous planner config. (What this really means is that zone adds happen despite being blocked by one or more MUPdate-related reasons.)", + "type": "boolean" + }, + "discretionary_zones_placed": { + "description": "Sled ID → kinds of discretionary zones placed there", "type": "object", "additionalProperties": { "type": "array", "items": { - "$ref": "#/components/schemas/PlanningOutOfDateZone" + "$ref": "#/components/schemas/DiscretionaryZonePlacement" } } }, - "unsafe_zones": { + "out_of_eligible_sleds": { + "description": "Discretionary zone kind → (placed, wanted to place)", "type": "object", "additionalProperties": { - "$ref": "#/components/schemas/ZoneUnsafeToShutdown" + "$ref": "#/components/schemas/PlanningAddOutOfEligibleSleds" } }, - "updated_zones": { + "sleds_getting_ntp_and_discretionary_zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sleds_missing_crucible_zone": { "type": "object", "additionalProperties": { "type": "array", "items": { - "$ref": "#/components/schemas/BlueprintZoneConfig" + "$ref": "#/components/schemas/TypedUuidForZpoolKind" } } }, - "waiting_on": { - "nullable": true, - "description": "What are we waiting on to start zone updates?", - "allOf": [ - { - "$ref": "#/components/schemas/ZoneUpdatesWaitingOn" - } - ] - }, - "waiting_zones": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ZoneWaitingToExpunge" - } - } - }, - "required": [ - "expunged_zones", - "out_of_date_zones", - "unsafe_zones", - "updated_zones", - "waiting_zones" - ] - }, - "PortConfigV2": { - "type": "object", - "properties": { - "addresses": { - "description": "This port's addresses and optional vlan IDs", + "sleds_missing_ntp_zone": { "type": "array", "items": { - "$ref": "#/components/schemas/UplinkAddressConfig" - } - }, - "autoneg": { - "description": "Whether or not to set autonegotiation", - "default": false, - "type": "boolean" + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true }, - "bgp_peers": { - "description": "BGP peers on this port", + "sleds_waiting_for_ntp_zone": { "type": "array", "items": { - "$ref": "#/components/schemas/BgpPeerConfig" - } - }, - "lldp": { - "nullable": true, - "description": "LLDP configuration for this port", - "allOf": [ - { - "$ref": "#/components/schemas/LldpPortConfig" - } - ] - }, - "port": { - "description": "Nmae of the port this config applies to.", - "type": "string" + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true }, - "routes": { - "description": "The set of routes associated with this port.", + "sleds_without_ntp_zones_in_inventory": { "type": "array", "items": { - "$ref": "#/components/schemas/RouteConfig" - } + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true }, - "switch": { - "description": "Switch the port belongs to.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] + "sleds_without_zpools_for_ntp_zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true }, - "tx_eq": { - "nullable": true, - "description": "TX-EQ configuration for this port", - "allOf": [ - { - "$ref": "#/components/schemas/TxEqConfig" - } - ] + "sufficient_zones_exist": { + "description": "Discretionary zone kind → (wanted to place, num existing)", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningAddSufficientZonesExist" + } }, - "uplink_port_fec": { + "waiting_on": { "nullable": true, - "description": "Port forward error correction type.", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Port speed.", + "description": "What are we waiting on to start zone additions?", "allOf": [ { - "$ref": "#/components/schemas/PortSpeed" + "$ref": "#/components/schemas/ZoneAddWaitingOn" } ] } }, "required": [ - "addresses", - "bgp_peers", - "port", - "routes", - "switch", - "uplink_port_speed" - ] - }, - "PortFec": { - "description": "Switchport FEC options", - "type": "string", - "enum": [ - "firecode", - "none", - "rs" - ] - }, - "PortSpeed": { - "description": "Switchport Speed options", - "type": "string", - "enum": [ - "speed0_g", - "speed1_g", - "speed10_g", - "speed25_g", - "speed40_g", - "speed50_g", - "speed100_g", - "speed200_g", - "speed400_g" + "add_update_blocked_reasons", + "add_zones_with_mupdate_override", + "discretionary_zones_placed", + "out_of_eligible_sleds", + "sleds_getting_ntp_and_discretionary_zones", + "sleds_missing_crucible_zone", + "sleds_missing_ntp_zone", + "sleds_waiting_for_ntp_zone", + "sleds_without_ntp_zones_in_inventory", + "sleds_without_zpools_for_ntp_zones", + "sufficient_zones_exist" ] }, - "ProbeExternalIp": { + "PlanningAddSufficientZonesExist": { + "description": "We have at least the minimum required number of zones of a given kind.", "type": "object", "properties": { - "first_port": { + "num_existing": { "type": "integer", - "format": "uint16", + "format": "uint", "minimum": 0 }, - "ip": { - "type": "string", - "format": "ip" - }, - "kind": { - "$ref": "#/components/schemas/ProbeExternalIpKind" - }, - "last_port": { + "target_count": { "type": "integer", - "format": "uint16", + "format": "uint", "minimum": 0 } }, "required": [ - "first_port", - "ip", - "kind", - "last_port" - ] - }, - "ProbeExternalIpKind": { - "type": "string", - "enum": [ - "snat", - "floating", - "ephemeral" + "num_existing", + "target_count" ] }, - "ProbeInfo": { + "PlanningCockroachdbSettingsStepReport": { "type": "object", "properties": { - "external_ips": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ProbeExternalIp" - } - }, - "id": { - "type": "string", - "format": "uuid" - }, - "interface": { - "$ref": "#/components/schemas/NetworkInterface" - }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "sled": { - "type": "string", - "format": "uuid" + "preserve_downgrade": { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" + } + }, + "required": [ + "preserve_downgrade" + ] + }, + "PlanningDecommissionStepReport": { + "type": "object", + "properties": { + "zombie_sleds": { + "description": "Decommissioned sleds that unexpectedly appeared as commissioned.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } } }, "required": [ - "external_ips", - "id", - "interface", - "name", - "sled" + "zombie_sleds" ] }, - "ProducerEndpoint": { - "description": "Information announced by a metric server, used so that clients can contact it and collect available metric data from it.", + "PlanningExpungeStepReport": { "type": "object", "properties": { - "address": { - "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", - "type": "string" - }, - "id": { - "description": "A unique ID for this producer.", - "type": "string", - "format": "uuid" - }, - "interval": { - "description": "The interval on which `oximeter` should collect metrics.", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] - }, - "kind": { - "description": "The kind of producer.", - "allOf": [ - { - "$ref": "#/components/schemas/ProducerKind" - } - ] + "orphan_disks": { + "description": "Expunged disks not present in the parent blueprint.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" + } } }, "required": [ - "address", - "id", - "interval", - "kind" + "orphan_disks" ] }, - "ProducerEndpointResultsPage": { - "description": "A single page of results", + "PlanningMgsUpdatesStepReport": { "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/ProducerEndpoint" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "pending_mgs_updates": { + "$ref": "#/components/schemas/PendingMgsUpdates" } }, "required": [ - "items" + "pending_mgs_updates" ] }, - "ProducerKind": { - "description": "The kind of metric producer this is.", + "PlanningNexusGenerationBumpReport": { "oneOf": [ { - "description": "The producer is a sled-agent.", - "type": "string", - "enum": [ - "sled_agent" - ] - }, - { - "description": "The producer is an Omicron-managed service.", - "type": "string", - "enum": [ - "service" + "description": "We have no reason to bump the Nexus generation number.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "nothing_to_report" + ] + } + }, + "required": [ + "component" ] }, { - "description": "The producer is a Propolis VMM managing a guest instance.", - "type": "string", - "enum": [ - "instance" + "description": "We are waiting on some condition before we can bump the Nexus generation.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "waiting_on" + ] + }, + "value": { + "$ref": "#/components/schemas/NexusGenerationBumpWaitingOn" + } + }, + "required": [ + "component", + "value" ] }, { - "description": "The producer is a management gateway service.", - "type": "string", - "enum": [ - "management_gateway" + "description": "We are bumping the Nexus generation number to this value.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "bumping_generation" + ] + }, + "value": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "component", + "value" ] } ] }, - "ProducerRegistrationResponse": { - "description": "Response to a successful producer registration.", + "PlanningNoopImageSourceConverted": { + "description": "How many of the total install-dataset zones and/or host phase 2 slots were noop-converted to use the artifact store on a particular sled.", "type": "object", "properties": { - "lease_duration": { - "description": "Period within which producers must renew their lease.\n\nProducers are required to periodically re-register with Nexus, to ensure that they are still collected from by `oximeter`.", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] + "host_phase_2_slot_a_eligible": { + "type": "boolean" + }, + "host_phase_2_slot_b_eligible": { + "type": "boolean" + }, + "num_dataset": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "num_eligible": { + "type": "integer", + "format": "uint", + "minimum": 0 } }, "required": [ - "lease_duration" + "host_phase_2_slot_a_eligible", + "host_phase_2_slot_b_eligible", + "num_dataset", + "num_eligible" ] }, - "QuiesceState": { - "description": "See [`QuiesceStatus`] for more on Nexus quiescing.\n\nAt any given time, Nexus is always in one of these states:\n\n```text Undetermined (have not loaded persistent state; don't know yet) | | load persistent state and find we're not quiescing v Running (normal operation) | | quiesce starts v DrainingSagas (no new sagas are allowed, but some are still running) | | no more sagas running v DrainingDb (no sagas running; no new db connections may be | acquired by Nexus at-large, but some are still held) | | no more database connections held v RecordingQuiesce (everything is quiesced aside from one connection being | used to record our final quiesced state) | | finish recording quiesce state in database v Quiesced (no sagas running, no database connections in use) ```\n\nQuiescing is (currently) a one-way trip: once a Nexus process starts quiescing, it will never go back to normal operation. It will never go back to an earlier stage, either.", + "PlanningNoopImageSourceSkipSledHostPhase2Reason": { "oneOf": [ { - "description": "We have not yet determined based on persistent state if we're supposed to be quiesced or not", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "undetermined" + "both_slots_already_artifact" ] } }, "required": [ - "state" + "type" ] }, { - "description": "Normal operation", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "running" + "sled_not_in_inventory" ] } }, "required": [ - "state" + "type" + ] + } + ] + }, + "PlanningNoopImageSourceSkipSledZonesReason": { + "oneOf": [ + { + "type": "object", + "properties": { + "num_total": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "all_zones_already_artifact" + ] + } + }, + "required": [ + "num_total", + "type" ] }, { - "description": "New sagas disallowed, but some are still running on some Nexus instances", "type": "object", "properties": { - "quiesce_details": { - "type": "object", - "properties": { - "time_requested": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "time_requested" + "type": { + "type": "string", + "enum": [ + "sled_not_in_inventory" ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string" }, - "state": { + "type": { "type": "string", "enum": [ - "draining_sagas" + "error_retrieving_zone_manifest" ] } }, "required": [ - "quiesce_details", - "state" + "error", + "type" ] }, { - "description": "No sagas running on any Nexus instances\n\nNo new database connections may be claimed, but some database connections are still held.", "type": "object", "properties": { - "quiesce_details": { - "type": "object", - "properties": { - "duration_draining_sagas": { - "$ref": "#/components/schemas/Duration" - }, - "time_requested": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "duration_draining_sagas", - "time_requested" + "id": { + "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" + }, + "type": { + "type": "string", + "enum": [ + "remove_mupdate_override" ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, + "PlanningNoopImageSourceSkipZoneReason": { + "oneOf": [ + { + "type": "object", + "properties": { + "file_name": { + "type": "string" }, - "state": { + "type": { "type": "string", "enum": [ - "draining_db" + "zone_not_in_manifest" ] + }, + "zone_kind": { + "type": "string" } }, "required": [ - "quiesce_details", - "state" + "file_name", + "type", + "zone_kind" ] }, { - "description": "No database connections in use except to record the final \"quiesced\" state", "type": "object", "properties": { - "quiesce_details": { - "type": "object", - "properties": { - "duration_draining_db": { - "$ref": "#/components/schemas/Duration" - }, - "duration_draining_sagas": { - "$ref": "#/components/schemas/Duration" - }, - "time_requested": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "duration_draining_db", - "duration_draining_sagas", - "time_requested" - ] + "error": { + "type": "string" }, - "state": { + "file_name": { + "type": "string" + }, + "type": { "type": "string", "enum": [ - "recording_quiesce" + "invalid_artifact" ] + }, + "zone_kind": { + "type": "string" } }, "required": [ - "quiesce_details", - "state" + "error", + "file_name", + "type", + "zone_kind" ] }, { - "description": "Nexus has no sagas running and is not using the database", "type": "object", "properties": { - "quiesce_details": { - "type": "object", - "properties": { - "duration_draining_db": { - "$ref": "#/components/schemas/Duration" - }, - "duration_draining_sagas": { - "$ref": "#/components/schemas/Duration" - }, - "duration_recording_quiesce": { - "$ref": "#/components/schemas/Duration" - }, - "duration_total": { - "$ref": "#/components/schemas/Duration" - }, - "time_quiesced": { - "type": "string", - "format": "date-time" - }, - "time_requested": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "duration_draining_db", - "duration_draining_sagas", - "duration_recording_quiesce", - "duration_total", - "time_quiesced", - "time_requested" - ] + "artifact_hash": { + "type": "string", + "format": "hex string (32 bytes)" }, - "state": { + "file_name": { + "type": "string" + }, + "type": { "type": "string", "enum": [ - "quiesced" + "artifact_not_in_repo" ] + }, + "zone_kind": { + "type": "string" } }, "required": [ - "quiesce_details", - "state" - ] - } - ] - }, - "QuiesceStatus": { - "description": "Describes whether Nexus is quiescing or quiesced and what, if anything, is blocking the quiesce process\n\n**Quiescing** is the process of draining Nexus of running sagas and stopping all use of the database in preparation for upgrade. See [`QuiesceState`] for more on the stages involved.", - "type": "object", - "properties": { - "db_claims": { - "title": "IdOrdMap", - "description": "what database claims are currently held (by any part of Nexus)\n\nEntries here prevent transitioning from `WaitingForDb` to `Quiesced`.", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/HeldDbClaimInfo" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/HeldDbClaimInfo" - }, - "uniqueItems": true - }, - "sagas": { - "description": "information about saga quiescing", - "allOf": [ - { - "$ref": "#/components/schemas/SagaQuiesceStatus" - } - ] - }, - "state": { - "description": "what stage of quiescing is Nexus at", - "allOf": [ - { - "$ref": "#/components/schemas/QuiesceState" - } - ] - } - }, - "required": [ - "db_claims", - "sagas", - "state" - ] - }, - "RackInitializationRequest": { - "type": "object", - "properties": { - "allowed_source_ips": { - "description": "IPs or subnets allowed to make requests to user-facing services", - "allOf": [ - { - "$ref": "#/components/schemas/AllowedSourceIps" - } - ] - }, - "blueprint": { - "description": "Blueprint describing services initialized by RSS.", - "allOf": [ - { - "$ref": "#/components/schemas/Blueprint" - } - ] - }, - "certs": { - "description": "x.509 Certificates used to encrypt communication with the external API.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Certificate" - } - }, - "crucible_datasets": { - "description": "Crucible datasets on the rack which have been provisioned by RSS.", - "type": "array", - "items": { - "$ref": "#/components/schemas/CrucibleDatasetCreateRequest" - } - }, - "external_dns_zone_name": { - "description": "delegated DNS name for external DNS", - "type": "string" - }, - "external_port_count": { - "description": "The external qsfp ports per sidecar", - "allOf": [ - { - "$ref": "#/components/schemas/ExternalPortDiscovery" - } - ] - }, - "internal_dns_zone_config": { - "description": "initial internal DNS config", - "allOf": [ - { - "$ref": "#/components/schemas/DnsConfigParams" - } - ] - }, - "internal_services_ip_pool_ranges": { - "description": "Ranges of the service IP pool which may be used for internal services, such as Nexus.", - "type": "array", - "items": { - "$ref": "#/components/schemas/IpRange" - } - }, - "physical_disks": { - "description": "\"Managed\" physical disks owned by the control plane", - "type": "array", - "items": { - "$ref": "#/components/schemas/PhysicalDiskPutRequest" - } - }, - "rack_network_config": { - "description": "Initial rack network configuration", - "allOf": [ - { - "$ref": "#/components/schemas/RackNetworkConfigV2" - } - ] - }, - "recovery_silo": { - "description": "configuration for the initial (recovery) Silo", - "allOf": [ - { - "$ref": "#/components/schemas/RecoverySiloConfig" - } + "artifact_hash", + "file_name", + "type", + "zone_kind" ] - }, - "zpools": { - "description": "Zpools created within the physical disks created by the control plane.", - "type": "array", - "items": { - "$ref": "#/components/schemas/ZpoolPutRequest" - } } - }, - "required": [ - "allowed_source_ips", - "blueprint", - "certs", - "crucible_datasets", - "external_dns_zone_name", - "external_port_count", - "internal_dns_zone_config", - "internal_services_ip_pool_ranges", - "physical_disks", - "rack_network_config", - "recovery_silo", - "zpools" ] }, - "RackNetworkConfigV2": { - "description": "Initial network configuration", + "PlanningNoopImageSourceStepReport": { "type": "object", "properties": { - "bfd": { - "description": "BFD configuration for connecting the rack to external networks", - "default": [], - "type": "array", - "items": { - "$ref": "#/components/schemas/BfdPeerConfig" - } - }, - "bgp": { - "description": "BGP configurations for connecting the rack to external networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpConfig" - } - }, - "infra_ip_first": { - "description": "First ip address to be used for configuring network infrastructure", - "type": "string", - "format": "ipv4" - }, - "infra_ip_last": { - "description": "Last ip address to be used for configuring network infrastructure", - "type": "string", - "format": "ipv4" - }, - "ports": { - "description": "Uplinks for connecting the rack to external networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/PortConfigV2" + "converted": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceConverted" } }, - "rack_subnet": { - "$ref": "#/components/schemas/Ipv6Net" - } - }, - "required": [ - "bgp", - "infra_ip_first", - "infra_ip_last", - "ports", - "rack_subnet" - ] - }, - "ReconfiguratorConfig": { - "type": "object", - "properties": { - "planner_config": { - "$ref": "#/components/schemas/PlannerConfig" - }, - "planner_enabled": { + "no_target_release": { "type": "boolean" - } - }, - "required": [ - "planner_config", - "planner_enabled" - ] - }, - "ReconfiguratorConfigParam": { - "type": "object", - "properties": { - "config": { - "$ref": "#/components/schemas/ReconfiguratorConfig" }, - "version": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - "required": [ - "config", - "version" - ] - }, - "ReconfiguratorConfigView": { - "type": "object", - "properties": { - "config": { - "$ref": "#/components/schemas/ReconfiguratorConfig" + "skipped_sled_host_phase_2": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledHostPhase2Reason" + } }, - "time_modified": { - "type": "string", - "format": "date-time" + "skipped_sled_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledZonesReason" + } }, - "version": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "skipped_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipZoneReason" + } } }, "required": [ - "config", - "time_modified", - "version" + "converted", + "no_target_release", + "skipped_sled_host_phase_2", + "skipped_sled_zones", + "skipped_zones" ] }, - "RecoverySiloConfig": { + "PlanningOutOfDateZone": { + "description": "We have at least the minimum required number of zones of a given kind.", "type": "object", "properties": { - "silo_name": { - "$ref": "#/components/schemas/Name" - }, - "user_name": { - "$ref": "#/components/schemas/UserId" + "desired_image_source": { + "$ref": "#/components/schemas/BlueprintZoneImageSource" }, - "user_password_hash": { - "$ref": "#/components/schemas/NewPasswordHash" + "zone_config": { + "$ref": "#/components/schemas/BlueprintZoneConfig" } }, "required": [ - "silo_name", - "user_name", - "user_password_hash" + "desired_image_source", + "zone_config" ] }, - "RepairFinishInfo": { + "PlanningZoneUpdatesStepReport": { "type": "object", "properties": { - "aborted": { - "type": "boolean" + "expunged_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + } }, - "repair_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" + "out_of_date_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PlanningOutOfDateZone" + } + } }, - "repair_type": { - "$ref": "#/components/schemas/UpstairsRepairType" + "unsafe_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ZoneUnsafeToShutdown" + } }, - "repairs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DownstairsUnderRepair" + "updated_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } } }, - "session_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + "waiting_on": { + "nullable": true, + "description": "What are we waiting on to start zone updates?", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneUpdatesWaitingOn" + } + ] }, - "time": { - "type": "string", - "format": "date-time" + "waiting_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ZoneWaitingToExpunge" + } } }, "required": [ - "aborted", - "repair_id", - "repair_type", - "repairs", - "session_id", - "time" + "expunged_zones", + "out_of_date_zones", + "unsafe_zones", + "updated_zones", + "waiting_zones" ] }, - "RepairProgress": { + "PortConfigV2": { "type": "object", "properties": { - "current_item": { - "type": "integer", - "format": "int64" + "addresses": { + "description": "This port's addresses and optional vlan IDs", + "type": "array", + "items": { + "$ref": "#/components/schemas/UplinkAddressConfig" + } }, - "time": { - "type": "string", - "format": "date-time" + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" }, - "total_items": { - "type": "integer", - "format": "int64" - } - }, - "required": [ - "current_item", - "time", - "total_items" - ] - }, - "RepairStartInfo": { - "type": "object", - "properties": { - "repair_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } }, - "repair_type": { - "$ref": "#/components/schemas/UpstairsRepairType" + "lldp": { + "nullable": true, + "description": "LLDP configuration for this port", + "allOf": [ + { + "$ref": "#/components/schemas/LldpPortConfig" + } + ] }, - "repairs": { + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", "type": "array", "items": { - "$ref": "#/components/schemas/DownstairsUnderRepair" + "$ref": "#/components/schemas/RouteConfig" } }, - "session_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] }, - "time": { - "type": "string", - "format": "date-time" + "tx_eq": { + "nullable": true, + "description": "TX-EQ configuration for this port", + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig" + } + ] + }, + "uplink_port_fec": { + "nullable": true, + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] } }, "required": [ - "repair_id", - "repair_type", - "repairs", - "session_id", - "time" + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_speed" + ] + }, + "PortFec": { + "description": "Switchport FEC options", + "type": "string", + "enum": [ + "firecode", + "none", + "rs" + ] + }, + "PortSpeed": { + "description": "Switchport Speed options", + "type": "string", + "enum": [ + "speed0_g", + "speed1_g", + "speed10_g", + "speed25_g", + "speed40_g", + "speed50_g", + "speed100_g", + "speed200_g", + "speed400_g" ] }, - "RotBootloaderStatus": { + "ProbeExternalIp": { "type": "object", "properties": { - "stage0_next_version": { - "$ref": "#/components/schemas/TufRepoVersion" + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/ProbeExternalIpKind" }, - "stage0_version": { - "$ref": "#/components/schemas/TufRepoVersion" + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "stage0_next_version", - "stage0_version" + "first_port", + "ip", + "kind", + "last_port" ] }, - "RotSlot": { - "oneOf": [ - { - "type": "object", - "properties": { - "slot": { - "type": "string", - "enum": [ - "a" - ] - } - }, - "required": [ - "slot" - ] - }, - { - "type": "object", - "properties": { - "slot": { - "type": "string", - "enum": [ - "b" - ] - } - }, - "required": [ - "slot" - ] - } + "ProbeExternalIpKind": { + "type": "string", + "enum": [ + "snat", + "floating", + "ephemeral" ] }, - "RotStatus": { + "ProbeInfo": { "type": "object", "properties": { - "active_slot": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/RotSlot" - } - ] + "external_ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ProbeExternalIp" + } }, - "slot_a_version": { - "$ref": "#/components/schemas/TufRepoVersion" + "id": { + "type": "string", + "format": "uuid" + }, + "interface": { + "$ref": "#/components/schemas/NetworkInterface" + }, + "name": { + "$ref": "#/components/schemas/Name" }, - "slot_b_version": { - "$ref": "#/components/schemas/TufRepoVersion" + "sled": { + "type": "string", + "format": "uuid" } }, "required": [ - "slot_a_version", - "slot_b_version" + "external_ips", + "id", + "interface", + "name", + "sled" ] }, - "RouteConfig": { + "ProducerEndpoint": { + "description": "Information announced by a metric server, used so that clients can contact it and collect available metric data from it.", "type": "object", "properties": { - "destination": { - "description": "The destination of the route.", + "address": { + "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", + "type": "string" + }, + "id": { + "description": "A unique ID for this producer.", + "type": "string", + "format": "uuid" + }, + "interval": { + "description": "The interval on which `oximeter` should collect metrics.", "allOf": [ { - "$ref": "#/components/schemas/IpNet" + "$ref": "#/components/schemas/Duration" } ] }, - "nexthop": { - "description": "The nexthop/gateway address.", - "type": "string", - "format": "ip" - }, - "rib_priority": { - "nullable": true, - "description": "The RIB priority (i.e. Admin Distance) associated with this route.", - "default": null, - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "vlan_id": { - "nullable": true, - "description": "The VLAN id associated with this route.", - "default": null, - "type": "integer", - "format": "uint16", - "minimum": 0 + "kind": { + "description": "The kind of producer.", + "allOf": [ + { + "$ref": "#/components/schemas/ProducerKind" + } + ] } }, "required": [ - "destination", - "nexthop" + "address", + "id", + "interval", + "kind" ] }, - "Saga": { - "description": "Sagas\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", + "ProducerEndpointResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "id": { - "type": "string", - "format": "uuid" + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProducerEndpoint" + } }, - "state": { - "$ref": "#/components/schemas/SagaState" + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" } }, "required": [ - "id", - "state" + "items" ] }, - "SagaErrorInfo": { + "ProducerKind": { + "description": "The kind of metric producer this is.", "oneOf": [ { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "action_failed" - ] - }, - "source_error": {} - }, - "required": [ - "error", - "source_error" + "description": "The producer is a sled-agent.", + "type": "string", + "enum": [ + "sled_agent" ] }, { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "deserialize_failed" - ] - }, - "message": { - "type": "string" - } - }, - "required": [ - "error", - "message" + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" ] }, { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "injected_error" - ] - } - }, - "required": [ - "error" + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" ] }, { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "serialize_failed" - ] - }, - "message": { - "type": "string" - } - }, - "required": [ - "error", - "message" + "description": "The producer is a management gateway service.", + "type": "string", + "enum": [ + "management_gateway" ] - }, - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "subsaga_create_failed" - ] - }, - "message": { - "type": "string" + } + ] + }, + "ProducerRegistrationResponse": { + "description": "Response to a successful producer registration.", + "type": "object", + "properties": { + "lease_duration": { + "description": "Period within which producers must renew their lease.\n\nProducers are required to periodically re-register with Nexus, to ensure that they are still collected from by `oximeter`.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" } - }, - "required": [ - "error", - "message" ] } + }, + "required": [ + "lease_duration" ] }, - "SagaQuiesceStatus": { + "RackInitializationRequest": { "type": "object", "properties": { - "drained_blueprint_id": { - "nullable": true, - "description": "blueprint id that we're \"fully drained up to\"\n\nIf this value is non-`None`, that means that:\n\n- saga creation is disallowed - no sagas are running - we have re-assigned sagas from other Nexus instances expunged in this blueprint or earlier - we have finished recovery for all those sagas (that had been assigned to us as of the re-assignment pass for this blueprint id)\n\nThis means that the only way we can wind up running another saga is if there's a new blueprint that expunges a different Nexus zone.", + "allowed_source_ips": { + "description": "IPs or subnets allowed to make requests to user-facing services", "allOf": [ { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, + "blueprint": { + "description": "Blueprint describing services initialized by RSS.", + "allOf": [ + { + "$ref": "#/components/schemas/Blueprint" } ] }, - "first_recovery_complete": { - "description": "whether at least one recovery pass has successfully completed\n\nWe have to track this because we can't quiesce until we know we've recovered all outstanding sagas.", - "type": "boolean" + "certs": { + "description": "x.509 Certificates used to encrypt communication with the external API.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Certificate" + } + }, + "crucible_datasets": { + "description": "Crucible datasets on the rack which have been provisioned by RSS.", + "type": "array", + "items": { + "$ref": "#/components/schemas/CrucibleDatasetCreateRequest" + } + }, + "external_dns_zone_name": { + "description": "delegated DNS name for external DNS", + "type": "string" }, - "new_sagas_allowed": { - "description": "current policy: are we allowed to *create* new sagas?\n\nThis also affects re-assigning sagas from expunged Nexus instances to ourselves. It does **not** affect saga recovery.", + "external_port_count": { + "description": "The external qsfp ports per sidecar", "allOf": [ { - "$ref": "#/components/schemas/SagasAllowed" + "$ref": "#/components/schemas/ExternalPortDiscovery" } ] }, - "reassignment_blueprint_id": { - "nullable": true, - "description": "blueprint id associated with last successful saga reassignment\n\nSimilar to the generation number, this is used to track whether we've accounted for all sagas for all expungements up through this target blueprint.", + "internal_dns_zone_config": { + "description": "initial internal DNS config", "allOf": [ { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + "$ref": "#/components/schemas/DnsConfigParams" } ] }, - "reassignment_generation": { - "description": "generation number for the saga reassignment\n\nThis gets bumped whenever a saga reassignment operation completes that may have re-assigned us some sagas. It's used to keep track of when we've recovered all sagas that could be assigned to us.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] + "internal_services_ip_pool_ranges": { + "description": "Ranges of the service IP pool which may be used for internal services, such as Nexus.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpRange" + } }, - "reassignment_pending": { - "description": "whether there is a saga reassignment operation happening\n\nThese operatinos may assign new sagas to Nexus that must be recovered and completed before quiescing can finish.", - "type": "boolean" + "physical_disks": { + "description": "\"Managed\" physical disks owned by the control plane", + "type": "array", + "items": { + "$ref": "#/components/schemas/PhysicalDiskPutRequest" + } }, - "recovered_blueprint_id": { - "nullable": true, - "description": "blueprint id that saga recovery has \"caught up to\"\n\nThis means that we have finished recovering any sagas that were re-assigned to us due to expungements of other Nexus zones up through this blueprint. Put differently: we know that we will never be assigned more sagas due to expungement unless the target blueprint changes past this one.\n\nThis does not mean that we've fully drained all sagas up through this blueprint. There may still be sagas running.", + "rack_network_config": { + "description": "Initial rack network configuration", "allOf": [ { - "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + "$ref": "#/components/schemas/RackNetworkConfigV2" } ] }, - "recovered_reassignment_generation": { - "description": "\"saga reassignment generation number\" that was \"caught up to\" by the last recovery pass\n\nThis is used with `reassignment_generation` to help us know when we've recovered all the sagas that may have been assigned to us during a given reassignment pass. See `reassignment_done()` for details.", + "recovery_silo": { + "description": "configuration for the initial (recovery) Silo", "allOf": [ { - "$ref": "#/components/schemas/Generation" + "$ref": "#/components/schemas/RecoverySiloConfig" } ] }, - "recovery_pending": { - "nullable": true, - "description": "If a recovery pass is ongoing, a snapshot of reassignment state when it started (which reflects what we'll be caught up to when it finishes)", - "allOf": [ - { - "$ref": "#/components/schemas/PendingRecovery" - } - ] + "zpools": { + "description": "Zpools created within the physical disks created by the control plane.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ZpoolPutRequest" + } + } + }, + "required": [ + "allowed_source_ips", + "blueprint", + "certs", + "crucible_datasets", + "external_dns_zone_name", + "external_port_count", + "internal_dns_zone_config", + "internal_services_ip_pool_ranges", + "physical_disks", + "rack_network_config", + "recovery_silo", + "zpools" + ] + }, + "RackNetworkConfigV2": { + "description": "Initial network configuration", + "type": "object", + "properties": { + "bfd": { + "description": "BFD configuration for connecting the rack to external networks", + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/BfdPeerConfig" + } }, - "sagas_pending": { - "title": "IdOrdMap", - "description": "list of sagas we need to wait to complete before quiescing\n\nThese are basically running sagas. They may have been created in this Nexus process lifetime or created in another process and then recovered in this one.", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/PendingSagaInfo" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/components/schemas/PendingSagaInfo" - }, - "uniqueItems": true + "$ref": "#/components/schemas/BgpConfig" + } + }, + "infra_ip_first": { + "description": "First ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "infra_ip_last": { + "description": "Last ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" + }, + "ports": { + "description": "Uplinks for connecting the rack to external networks", + "type": "array", + "items": { + "$ref": "#/components/schemas/PortConfigV2" + } + }, + "rack_subnet": { + "$ref": "#/components/schemas/Ipv6Net" } }, "required": [ - "first_recovery_complete", - "new_sagas_allowed", - "reassignment_generation", - "reassignment_pending", - "recovered_reassignment_generation", - "sagas_pending" + "bgp", + "infra_ip_first", + "infra_ip_last", + "ports", + "rack_subnet" ] }, - "SagaResultsPage": { - "description": "A single page of results", + "RecoverySiloConfig": { "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", + "silo_name": { + "$ref": "#/components/schemas/Name" + }, + "user_name": { + "$ref": "#/components/schemas/UserId" + }, + "user_password_hash": { + "$ref": "#/components/schemas/NewPasswordHash" + } + }, + "required": [ + "silo_name", + "user_name", + "user_password_hash" + ] + }, + "RepairFinishInfo": { + "type": "object", + "properties": { + "aborted": { + "type": "boolean" + }, + "repair_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" + }, + "repair_type": { + "$ref": "#/components/schemas/UpstairsRepairType" + }, + "repairs": { "type": "array", "items": { - "$ref": "#/components/schemas/Saga" + "$ref": "#/components/schemas/DownstairsUnderRepair" } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "session_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + }, + "time": { + "type": "string", + "format": "date-time" } }, "required": [ - "items" + "aborted", + "repair_id", + "repair_type", + "repairs", + "session_id", + "time" ] }, - "SagaState": { - "oneOf": [ - { - "description": "Saga is currently executing", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "running" - ] - } - }, - "required": [ - "state" - ] + "RepairProgress": { + "type": "object", + "properties": { + "current_item": { + "type": "integer", + "format": "int64" }, - { - "description": "Saga completed successfully", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "succeeded" - ] - } - }, - "required": [ - "state" - ] + "time": { + "type": "string", + "format": "date-time" + }, + "total_items": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "current_item", + "time", + "total_items" + ] + }, + "RepairStartInfo": { + "type": "object", + "properties": { + "repair_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" }, + "repair_type": { + "$ref": "#/components/schemas/UpstairsRepairType" + }, + "repairs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DownstairsUnderRepair" + } + }, + "session_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + }, + "time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "repair_id", + "repair_type", + "repairs", + "session_id", + "time" + ] + }, + "RotSlot": { + "oneOf": [ { - "description": "One or more saga actions failed and the saga was successfully unwound (i.e., undo actions were executed for any actions that were completed). The saga is no longer running.", - "type": "object", - "properties": { - "error_info": { - "$ref": "#/components/schemas/SagaErrorInfo" - }, - "error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "state": { + "type": "object", + "properties": { + "slot": { "type": "string", "enum": [ - "failed" + "a" ] } }, "required": [ - "error_info", - "error_node_name", - "state" + "slot" ] }, { - "description": "One or more saga actions failed, *and* one or more undo actions failed during unwinding. State managed by the saga may now be inconsistent. Support may be required to repair the state. The saga is no longer running.", "type": "object", "properties": { - "error_info": { - "$ref": "#/components/schemas/SagaErrorInfo" - }, - "error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "state": { + "slot": { "type": "string", "enum": [ - "stuck" + "b" ] - }, - "undo_error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "undo_source_error": {} + } }, "required": [ - "error_info", - "error_node_name", - "state", - "undo_error_node_name", - "undo_source_error" + "slot" ] } ] }, - "SagasAllowed": { - "description": "Policy determining whether new sagas are allowed to be started\n\nThis is used by Nexus quiesce to disallow creation of new sagas when we're trying to quiesce Nexus.", - "oneOf": [ - { - "description": "New sagas may be started (normal condition)", - "type": "string", - "enum": [ - "allowed" + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } ] }, - { - "description": "New sagas may not be started because we're quiescing or quiesced", + "nexthop": { + "description": "The nexthop/gateway address.", "type": "string", - "enum": [ - "disallowed_quiesce" - ] + "format": "ip" }, - { - "description": "New sagas may not be started because we just started up and haven't determined if we're quiescing yet", - "type": "string", - "enum": [ - "disallowed_unknown" - ] + "rib_priority": { + "nullable": true, + "description": "The RIB priority (i.e. Admin Distance) associated with this route.", + "default": null, + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "default": null, + "type": "integer", + "format": "uint16", + "minimum": 0 } + }, + "required": [ + "destination", + "nexthop" ] }, "ServerId": { @@ -8941,40 +5479,6 @@ "usable_physical_ram" ] }, - "SledAgentUpdateStatus": { - "type": "object", - "properties": { - "host_phase_2": { - "$ref": "#/components/schemas/HostPhase2Status" - }, - "sled_id": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - }, - "zones": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/ZoneStatus" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/ZoneStatus" - }, - "uniqueItems": true - } - }, - "required": [ - "host_phase_2", - "sled_id", - "zones" - ] - }, "SledCpuFamily": { "description": "Identifies the kind of CPU present on a sled, determined by reading CPUID.\n\nThis is intended to broadly support the control plane answering the question \"can I run this instance on that sled?\" given an instance with either no or some CPU platform requirement. It is not enough information for more precise placement questions - for example, is a CPU a high-frequency part or many-core part? We don't include Genoa here, but in that CPU family there are high frequency parts, many-core parts, and large-cache parts. To support those questions (or satisfactorily answer #8730) we would need to collect additional information and send it along.", "oneOf": [ @@ -9008,80 +5512,6 @@ } ] }, - "SledId": { - "type": "object", - "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForSledKind" - } - }, - "required": [ - "id" - ] - }, - "SledPolicy": { - "description": "The operator-defined policy of a sled.", - "oneOf": [ - { - "description": "The operator has indicated that the sled is in-service.", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "in_service" - ] - }, - "provision_policy": { - "description": "Determines whether new resources can be provisioned onto the sled.", - "allOf": [ - { - "$ref": "#/components/schemas/SledProvisionPolicy" - } - ] - } - }, - "required": [ - "kind", - "provision_policy" - ] - }, - { - "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", - "type": "object", - "properties": { - "kind": { - "type": "string", - "enum": [ - "expunged" - ] - } - }, - "required": [ - "kind" - ] - } - ] - }, - "SledProvisionPolicy": { - "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", - "oneOf": [ - { - "description": "New resources will be provisioned on this sled.", - "type": "string", - "enum": [ - "provisionable" - ] - }, - { - "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", - "type": "string", - "enum": [ - "non_provisionable" - ] - } - ] - }, "SledRole": { "description": "Describes the role of the sled within the rack.\n\nNote that this may change if the sled is physically moved within the rack.", "oneOf": [ @@ -9101,19 +5531,6 @@ } ] }, - "SledSelector": { - "type": "object", - "properties": { - "sled": { - "description": "ID of the sled", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "sled" - ] - }, "SledState": { "description": "The current state of the sled.", "oneOf": [ @@ -9191,169 +5608,48 @@ } }, "required": [ - "first_port", - "ip", - "last_port" - ] - }, - "SpStatus": { - "type": "object", - "properties": { - "slot0_version": { - "$ref": "#/components/schemas/TufRepoVersion" - }, - "slot1_version": { - "$ref": "#/components/schemas/TufRepoVersion" - } - }, - "required": [ - "slot0_version", - "slot1_version" - ] - }, - "SpType": { - "description": "`SpType`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", - "type": "string", - "enum": [ - "sled", - "power", - "switch" - ] - }, - "Srv": { - "type": "object", - "properties": { - "port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "prio": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "target": { - "type": "string" - }, - "weight": { - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "port", - "prio", - "target", - "weight" - ] - }, - "SupportBundleCreate": { - "type": "object", - "properties": { - "user_comment": { - "nullable": true, - "description": "User comment for the support bundle", - "type": "string" - } - } - }, - "SupportBundleInfo": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "reason_for_creation": { - "type": "string" - }, - "reason_for_failure": { - "nullable": true, - "type": "string" - }, - "state": { - "$ref": "#/components/schemas/SupportBundleState" - }, - "time_created": { - "type": "string", - "format": "date-time" - }, - "user_comment": { - "nullable": true, - "type": "string" - } - }, - "required": [ - "id", - "reason_for_creation", - "state", - "time_created" - ] - }, - "SupportBundleInfoResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/SupportBundleInfo" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" + "first_port", + "ip", + "last_port" ] }, - "SupportBundleState": { - "oneOf": [ - { - "description": "Support Bundle still actively being collected.\n\nThis is the initial state for a Support Bundle, and it will automatically transition to either \"Failing\" or \"Active\".\n\nIf a user no longer wants to access a Support Bundle, they can request cancellation, which will transition to the \"Destroying\" state.", - "type": "string", - "enum": [ - "collecting" - ] - }, - { - "description": "Support Bundle is being destroyed.\n\nOnce backing storage has been freed, this bundle is destroyed.", - "type": "string", - "enum": [ - "destroying" - ] - }, - { - "description": "Support Bundle was not created successfully, or was created and has lost backing storage.\n\nThe record of the bundle still exists for readability, but the only valid operation on these bundles is to destroy them.", - "type": "string", - "enum": [ - "failed" - ] - }, - { - "description": "Support Bundle has been processed, and is ready for usage.", - "type": "string", - "enum": [ - "active" - ] - } + "SpType": { + "description": "`SpType`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", + "type": "string", + "enum": [ + "sled", + "power", + "switch" ] }, - "SupportBundleUpdate": { + "Srv": { "type": "object", "properties": { - "user_comment": { - "nullable": true, - "description": "User comment for the support bundle", + "port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "prio": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "target": { "type": "string" + }, + "weight": { + "type": "integer", + "format": "uint16", + "minimum": 0 } - } + }, + "required": [ + "port", + "prio", + "target", + "weight" + ] }, "SwitchLocation": { "description": "Identifies switch physical location", @@ -9393,75 +5689,6 @@ "SwitchPutResponse": { "type": "object" }, - "TufRepoVersion": { - "oneOf": [ - { - "type": "object", - "properties": { - "zone_status_version": { - "type": "string", - "enum": [ - "unknown" - ] - } - }, - "required": [ - "zone_status_version" - ] - }, - { - "type": "object", - "properties": { - "zone_status_version": { - "type": "string", - "enum": [ - "install_dataset" - ] - } - }, - "required": [ - "zone_status_version" - ] - }, - { - "type": "object", - "properties": { - "details": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, - "zone_status_version": { - "type": "string", - "enum": [ - "version" - ] - } - }, - "required": [ - "details", - "zone_status_version" - ] - }, - { - "type": "object", - "properties": { - "details": { - "type": "string" - }, - "zone_status_version": { - "type": "string", - "enum": [ - "error" - ] - } - }, - "required": [ - "details", - "zone_status_version" - ] - } - ] - }, "TxEqConfig": { "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", "type": "object", @@ -9506,10 +5733,6 @@ "type": "string", "format": "uuid" }, - "TypedUuidForDemoSagaKind": { - "type": "string", - "format": "uuid" - }, "TypedUuidForDownstairsRegionKind": { "type": "string", "format": "uuid" @@ -9546,134 +5769,6 @@ "type": "string", "format": "uuid" }, - "UninitializedSled": { - "description": "A sled that has not been added to an initialized rack yet", - "type": "object", - "properties": { - "baseboard": { - "$ref": "#/components/schemas/Baseboard" - }, - "cubby": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "rack_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "baseboard", - "cubby", - "rack_id" - ] - }, - "UninitializedSledId": { - "description": "The unique hardware ID for a sled", - "type": "object", - "properties": { - "part": { - "type": "string" - }, - "serial": { - "type": "string" - } - }, - "required": [ - "part", - "serial" - ] - }, - "UninitializedSledResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/UninitializedSled" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "UpdateAttemptStatus": { - "description": "status of a single update attempt", - "type": "string", - "enum": [ - "not_started", - "fetching_artifact", - "precheck", - "updating", - "update_waiting", - "post_update", - "post_update_wait", - "done" - ] - }, - "UpdateCompletedHow": { - "type": "string", - "enum": [ - "found_no_changes_needed", - "completed_update", - "waited_for_concurrent_update", - "took_over_concurrent_update" - ] - }, - "UpdateStatus": { - "type": "object", - "properties": { - "mgs_driven": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/MgsDrivenUpdateStatus" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/MgsDrivenUpdateStatus" - }, - "uniqueItems": true - }, - "sleds": { - "title": "IdOrdMap", - "x-rust-type": { - "crate": "iddqd", - "parameters": [ - { - "$ref": "#/components/schemas/SledAgentUpdateStatus" - } - ], - "path": "iddqd::IdOrdMap", - "version": "*" - }, - "type": "array", - "items": { - "$ref": "#/components/schemas/SledAgentUpdateStatus" - }, - "uniqueItems": true - } - }, - "required": [ - "mgs_driven", - "sleds" - ] - }, "UplinkAddressConfig": { "type": "object", "properties": { @@ -9807,29 +5902,6 @@ "format": "uint32", "minimum": 0 }, - "WaitingStatus": { - "description": "externally-exposed status for waiting updates", - "type": "object", - "properties": { - "baseboard_id": { - "$ref": "#/components/schemas/BaseboardId" - }, - "nattempts_done": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "next_attempt_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "baseboard_id", - "nattempts_done", - "next_attempt_time" - ] - }, "ZoneAddWaitingOn": { "oneOf": [ { @@ -9849,25 +5921,6 @@ } ] }, - "ZoneStatus": { - "type": "object", - "properties": { - "version": { - "$ref": "#/components/schemas/TufRepoVersion" - }, - "zone_id": { - "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" - }, - "zone_type": { - "$ref": "#/components/schemas/OmicronZoneType" - } - }, - "required": [ - "version", - "zone_id", - "zone_type" - ] - }, "ZoneUnsafeToShutdown": { "description": "Zones which should not be shut down, because their lack of availability could be problematic for the successful functioning of the deployed system.", "oneOf": [ @@ -10063,25 +6116,6 @@ } ] }, - "TimeAndIdSortMode": { - "description": "Supported set of sort modes for scanning by timestamp and ID", - "oneOf": [ - { - "description": "sort in increasing order of timestamp and ID, i.e., earliest first", - "type": "string", - "enum": [ - "time_and_id_ascending" - ] - }, - { - "description": "sort in increasing order of timestamp and ID, i.e., most recent first", - "type": "string", - "enum": [ - "time_and_id_descending" - ] - } - ] - }, "TypedUuidForPropolisKind": { "type": "string", "format": "uuid" diff --git a/openapi/nexus-lockstep.json b/openapi/nexus-lockstep.json index 5acf17a057f..8dbc8b3c484 100644 --- a/openapi/nexus-lockstep.json +++ b/openapi/nexus-lockstep.json @@ -10,74 +10,7178 @@ "version": "0.0.1" }, "paths": { - "/v1/ping": { + "/bgtasks": { "get": { - "summary": "Ping API", - "description": "Always responds with Ok if it responds at all.", - "operationId": "ping", + "summary": "List background tasks", + "description": "This is a list of discrete background activities that Nexus carries out. This is exposed for support and debugging.", + "operationId": "bgtask_list", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Ping" + "title": "Map_of_BackgroundTask", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BackgroundTask" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/bgtasks/activate": { + "post": { + "summary": "Activates one or more background tasks, causing them to be run immediately", + "description": "if idle, or scheduled to run again as soon as possible if already running.", + "operationId": "bgtask_activate", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BackgroundTasksActivateRequest" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/bgtasks/view/{bgtask_name}": { + "get": { + "summary": "Fetch status of one background task", + "description": "This is exposed for support and debugging.", + "operationId": "bgtask_view", + "parameters": [ + { + "in": "path", + "name": "bgtask_name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BackgroundTask" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/clickhouse/policy": { + "get": { + "summary": "Get the current clickhouse policy", + "operationId": "clickhouse_policy_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ClickhousePolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Set the new clickhouse policy", + "operationId": "clickhouse_policy_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ClickhousePolicy" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/demo-saga": { + "post": { + "summary": "Kick off an instance of the \"demo\" saga", + "description": "This saga is used for demo and testing. The saga just waits until you complete using the `saga_demo_complete` API.", + "operationId": "saga_demo_create", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DemoSaga" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/demo-saga/{demo_saga_id}/complete": { + "post": { + "summary": "Complete a waiting demo saga", + "description": "Note that the id used here is not the same as the id of the saga. It's the one returned by the `saga_demo_create` API.", + "operationId": "saga_demo_complete", + "parameters": [ + { + "in": "path", + "name": "demo_saga_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForDemoSagaKind" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/all": { + "get": { + "summary": "Lists blueprints", + "operationId": "blueprint_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintMetadataResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/deployment/blueprints/all/{blueprint_id}": { + "get": { + "summary": "Fetches one blueprint", + "operationId": "blueprint_view", + "parameters": [ + { + "in": "path", + "name": "blueprint_id", + "description": "ID of the blueprint", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Deletes one blueprint", + "operationId": "blueprint_delete", + "parameters": [ + { + "in": "path", + "name": "blueprint_id", + "description": "ID of the blueprint", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/import": { + "post": { + "summary": "Imports a client-provided blueprint", + "description": "This is intended for development and support, not end users or operators.", + "operationId": "blueprint_import", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/regenerate": { + "post": { + "summary": "Generates a new blueprint for the current system, re-evaluating anything", + "description": "that's changed since the last one was generated", + "operationId": "blueprint_regenerate", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Blueprint" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/target": { + "get": { + "summary": "Fetches the current target blueprint, if any", + "operationId": "blueprint_target_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTarget" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Make the specified blueprint the new target", + "operationId": "blueprint_target_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTargetSet" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTarget" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/blueprints/target/enabled": { + "put": { + "summary": "Set the `enabled` field of the current target blueprint", + "operationId": "blueprint_target_set_enabled", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTargetSet" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlueprintTarget" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/reconfigurator-config": { + "get": { + "summary": "Get the current reconfigurator configuration", + "operationId": "reconfigurator_config_show_current", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReconfiguratorConfigView" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Update the reconfigurator config at the latest versions", + "operationId": "reconfigurator_config_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReconfiguratorConfigParam" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/reconfigurator-config/{version}": { + "get": { + "summary": "Get the reconfigurator config at `version` if it exists", + "operationId": "reconfigurator_config_show", + "parameters": [ + { + "in": "path", + "name": "version", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReconfiguratorConfigView" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/deployment/update-status": { + "get": { + "summary": "Show deployed versions of artifacts", + "operationId": "update_status", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles": { + "get": { + "summary": "List all support bundles", + "operationId": "support_bundle_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/TimeAndIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfoResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "summary": "Create a new support bundle", + "operationId": "support_bundle_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}": { + "get": { + "summary": "View a support bundle", + "operationId": "support_bundle_view", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "Update a support bundle", + "operationId": "support_bundle_update", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "summary": "Delete an existing support bundle", + "description": "May also be used to cancel a support bundle which is currently being collected, or to remove metadata for a support bundle that has failed.", + "operationId": "support_bundle_delete", + "parameters": [ + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/download": { + "get": { + "summary": "Download the contents of a support bundle", + "operationId": "support_bundle_download", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "summary": "Download the metadata of a support bundle", + "operationId": "support_bundle_head", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/download/{file}": { + "get": { + "summary": "Download a file within a support bundle", + "operationId": "support_bundle_download_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "file", + "description": "The file within the bundle to download", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + }, + "head": { + "summary": "Download the metadata of a file within the support bundle", + "operationId": "support_bundle_head_file", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "path", + "name": "file", + "description": "The file within the bundle to download", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/experimental/v1/system/support-bundles/{bundle_id}/index": { + "get": { + "summary": "Download the index of a support bundle", + "operationId": "support_bundle_index", + "parameters": [ + { + "in": "header", + "name": "range", + "description": "A request to access a portion of the resource, such as `bytes=0-499`\n\nSee: ", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "bundle_id", + "description": "ID of the support bundle", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + } + } + }, + "/instances/{instance_id}/migrate": { + "post": { + "operationId": "instance_migrate", + "parameters": [ + { + "in": "path", + "name": "instance_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/TypedUuidForInstanceKind" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceMigrateRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/mgs-updates": { + "get": { + "summary": "Fetch information about ongoing MGS updates", + "operationId": "mgs_updates", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MgsUpdateDriverStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/oximeter/read-policy": { + "get": { + "summary": "Get the current oximeter read policy", + "operationId": "oximeter_read_policy_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OximeterReadPolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Set the new oximeter read policy", + "operationId": "oximeter_read_policy_set", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OximeterReadPolicy" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/physical-disk/expunge": { + "post": { + "summary": "Mark a physical disk as expunged", + "description": "This is an irreversible process! It should only be called after sufficient warning to the operator.\n\nThis is idempotent.", + "operationId": "physical_disk_expunge", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDiskPath" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/quiesce": { + "get": { + "summary": "Check whether Nexus is running normally, quiescing, or fully quiesced.", + "operationId": "quiesce_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QuiesceStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "summary": "Begin quiescing this Nexus instance", + "description": "This causes no new sagas to be started and eventually causes no database connections to become available. This is a one-way trip. There's no unquiescing Nexus.", + "operationId": "quiesce_start", + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sagas": { + "get": { + "summary": "List sagas", + "operationId": "saga_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SagaResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/sagas/{saga_id}": { + "get": { + "summary": "Fetch a saga", + "operationId": "saga_view", + "parameters": [ + { + "in": "path", + "name": "saga_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Saga" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sleds/add": { + "post": { + "summary": "Add sled to initialized rack", + "operationId": "sled_add", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledId" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledId" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sleds/expunge": { + "post": { + "summary": "Mark a sled as expunged", + "description": "This is an irreversible process! It should only be called after sufficient warning to the operator.\n\nThis is idempotent, and it returns the old policy of the sled.", + "operationId": "sled_expunge", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledSelector" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledPolicy" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/sleds/uninitialized": { + "get": { + "summary": "List uninitialized sleds", + "operationId": "sled_list_uninitialized", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSledResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/ping": { + "get": { + "summary": "Ping API", + "description": "Always responds with Ok if it responds at all.", + "operationId": "ping", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ping" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "ActivationReason": { + "description": "Describes why a background task was activated\n\nThis is only used for debugging. This is deliberately not made available to the background task itself. See \"Design notes\" in the module-level documentation for details.", + "type": "string", + "enum": [ + "signaled", + "timeout", + "dependency" + ] + }, + "ArtifactVersion": { + "description": "An artifact version.\n\nThis is a freeform identifier with some basic validation. It may be the serialized form of a semver version, or a custom identifier that uses the same character set as a semver, plus `_`.\n\nThe exact pattern accepted is `^[a-zA-Z0-9._+-]{1,63}$`.\n\n# Ord implementation\n\n`ArtifactVersion`s are not intended to be sorted, just compared for equality. `ArtifactVersion` implements `Ord` only for storage within sorted collections.", + "type": "string", + "pattern": "^[a-zA-Z0-9._+-]{1,63}$" + }, + "BackgroundTask": { + "description": "Background tasks\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", + "type": "object", + "properties": { + "current": { + "description": "Describes the current task status", + "allOf": [ + { + "$ref": "#/components/schemas/CurrentStatus" + } + ] + }, + "description": { + "description": "brief summary (for developers) of what this task does", + "type": "string" + }, + "last": { + "description": "Describes the last completed activation", + "allOf": [ + { + "$ref": "#/components/schemas/LastResult" + } + ] + }, + "name": { + "description": "unique identifier for this background task", + "type": "string" + }, + "period": { + "description": "how long after an activation completes before another will be triggered automatically\n\n(activations can also be triggered for other reasons)", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + } + }, + "required": [ + "current", + "description", + "last", + "name", + "period" + ] + }, + "BackgroundTasksActivateRequest": { + "description": "Query parameters for Background Task activation requests.", + "type": "object", + "properties": { + "bgtask_names": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "required": [ + "bgtask_names" + ] + }, + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "revision": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "revision", + "serial" + ] + }, + "BaseboardId": { + "description": "A unique baseboard id found during a collection\n\nBaseboard ids are the keys used to link up information from disparate sources (like a service processor and a sled agent).\n\nThese are normalized in the database. Each distinct baseboard id is assigned a uuid and shared across the many possible collections that reference it.\n\nUsually, the part number and serial number are combined with a revision number. We do not include that here. If we ever did find a baseboard with the same part number and serial number but a new revision number, we'd want to treat that as the same baseboard as one with a different revision number.", + "type": "object", + "properties": { + "part_number": { + "description": "Oxide Part Number", + "type": "string" + }, + "serial_number": { + "description": "Serial number (unique for a given part number)", + "type": "string" + } + }, + "required": [ + "part_number", + "serial_number" + ] + }, + "Blueprint": { + "description": "Describes a complete set of software and configuration for the system", + "type": "object", + "properties": { + "clickhouse_cluster_config": { + "nullable": true, + "description": "Allocation of Clickhouse Servers and Keepers for replicated clickhouse setups. This is set to `None` if replicated clickhouse is not in use.", + "allOf": [ + { + "$ref": "#/components/schemas/ClickhouseClusterConfig" + } + ] + }, + "cockroachdb_fingerprint": { + "description": "CockroachDB state fingerprint when this blueprint was created", + "type": "string" + }, + "cockroachdb_setting_preserve_downgrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to", + "allOf": [ + { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" + } + ] + }, + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", + "type": "string" + }, + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", + "type": "string" + }, + "external_dns_version": { + "description": "external DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "id": { + "description": "unique identifier for this blueprint", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "nexus_generation": { + "description": "The generation of the active group of Nexuses\n\nIf a Nexus instance notices it has a nexus_generation less than this value, it will start to quiesce in preparation for handing off control to the newer generation (see: RFD 588).", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "oximeter_read_mode": { + "description": "Whether oximeter should read from a single node or a cluster", + "allOf": [ + { + "$ref": "#/components/schemas/OximeterReadMode" + } + ] + }, + "oximeter_read_version": { + "description": "Oximeter read policy version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "pending_mgs_updates": { + "description": "List of pending MGS-mediated updates", + "allOf": [ + { + "$ref": "#/components/schemas/PendingMgsUpdates" + } + ] + }, + "sleds": { + "description": "A map of sled id -> desired configuration of the sled.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintSledConfig" + } + }, + "source": { + "description": "Source of this blueprint (can include planning report)", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintSource" + } + ] + }, + "target_release_minimum_generation": { + "description": "The minimum release generation to accept for target release configuration. Target release configuration with a generation less than this number will be ignored.\n\nFor example, let's say that the current target release generation is 5. Then, when reconfigurator detects a MUPdate:\n\n* the target release is ignored in favor of the install dataset * this field is set to 6\n\nOnce an operator sets a new target release, its generation will be 6 or higher. Reconfigurator will then know that it is back in charge of driving the system to the target release.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "cockroachdb_fingerprint", + "cockroachdb_setting_preserve_downgrade", + "comment", + "creator", + "external_dns_version", + "id", + "internal_dns_version", + "nexus_generation", + "oximeter_read_mode", + "oximeter_read_version", + "pending_mgs_updates", + "sleds", + "source", + "target_release_minimum_generation", + "time_created" + ] + }, + "BlueprintArtifactVersion": { + "description": "The version of an artifact in a blueprint.\n\nThis is used for debugging output.", + "oneOf": [ + { + "description": "A specific version of the image is available.", + "type": "object", + "properties": { + "artifact_version": { + "type": "string", + "enum": [ + "available" + ] + }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" + } + }, + "required": [ + "artifact_version", + "version" + ] + }, + { + "description": "The version could not be determined. This is non-fatal.", + "type": "object", + "properties": { + "artifact_version": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "artifact_version" + ] + } + ] + }, + "BlueprintDatasetConfig": { + "description": "Information about a dataset as recorded in a blueprint", + "type": "object", + "properties": { + "address": { + "nullable": true, + "type": "string" + }, + "compression": { + "$ref": "#/components/schemas/CompressionAlgorithm" + }, + "disposition": { + "$ref": "#/components/schemas/BlueprintDatasetDisposition" + }, + "id": { + "$ref": "#/components/schemas/TypedUuidForDatasetKind" + }, + "kind": { + "$ref": "#/components/schemas/DatasetKind" + }, + "pool": { + "$ref": "#/components/schemas/ZpoolName" + }, + "quota": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "reservation": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + } + }, + "required": [ + "compression", + "disposition", + "id", + "kind", + "pool" + ] + }, + "BlueprintDatasetDisposition": { + "description": "The desired state of an Omicron-managed dataset in a blueprint.\n\nPart of [`BlueprintDatasetConfig`].", + "oneOf": [ + { + "description": "The dataset is in-service.", + "type": "string", + "enum": [ + "in_service" + ] + }, + { + "description": "The dataset is permanently gone.", + "type": "string", + "enum": [ + "expunged" + ] + } + ] + }, + "BlueprintHostPhase2DesiredContents": { + "description": "Describes the desired contents of a host phase 2 slot (i.e., the boot partition on one of the internal M.2 drives).\n\nThis is the blueprint version of [`HostPhase2DesiredContents`].", + "oneOf": [ + { + "description": "Do not change the current contents.\n\nWe use this value when we've detected a sled has been mupdated (and we don't want to overwrite phase 2 images until we understand how to recover from that mupdate) and as the default value when reading a blueprint that was ledgered before this concept existed.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "current_contents" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Set the phase 2 slot to the given artifact.\n\nThe artifact will come from an unpacked and distributed TUF repo.", + "type": "object", + "properties": { + "hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "type": { + "type": "string", + "enum": [ + "artifact" + ] + }, + "version": { + "$ref": "#/components/schemas/BlueprintArtifactVersion" + } + }, + "required": [ + "hash", + "type", + "version" + ] + } + ] + }, + "BlueprintHostPhase2DesiredSlots": { + "description": "Describes the desired contents for both host phase 2 slots.\n\nThis is the blueprint version of [`HostPhase2DesiredSlots`].", + "type": "object", + "properties": { + "slot_a": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" + }, + "slot_b": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredContents" + } + }, + "required": [ + "slot_a", + "slot_b" + ] + }, + "BlueprintMetadata": { + "description": "Describe high-level metadata about a blueprint", + "type": "object", + "properties": { + "cockroachdb_fingerprint": { + "description": "CockroachDB state fingerprint when this blueprint was created", + "type": "string" + }, + "cockroachdb_setting_preserve_downgrade": { + "nullable": true, + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to (`None` if this value was retrieved from the database and was invalid)", + "allOf": [ + { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" + } + ] + }, + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", + "type": "string" + }, + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", + "type": "string" + }, + "external_dns_version": { + "description": "external DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "id": { + "description": "unique identifier for this blueprint", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "nexus_generation": { + "description": "The Nexus generation number\n\nSee [`Blueprint::nexus_generation`].", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "source": { + "description": "source of the blueprint (for debugging)", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintSource" + } + ] + }, + "target_release_minimum_generation": { + "description": "The minimum generation for the target release.\n\nSee [`Blueprint::target_release_minimum_generation`].", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "cockroachdb_fingerprint", + "comment", + "creator", + "external_dns_version", + "id", + "internal_dns_version", + "nexus_generation", + "source", + "target_release_minimum_generation", + "time_created" + ] + }, + "BlueprintMetadataResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintMetadata" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "BlueprintPhysicalDiskConfig": { + "description": "Information about an Omicron physical disk as recorded in a bluerprint.", + "type": "object", + "properties": { + "disposition": { + "$ref": "#/components/schemas/BlueprintPhysicalDiskDisposition" + }, + "id": { + "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" + }, + "identity": { + "$ref": "#/components/schemas/DiskIdentity" + }, + "pool_id": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + "required": [ + "disposition", + "id", + "identity", + "pool_id" + ] + }, + "BlueprintPhysicalDiskDisposition": { + "description": "The desired state of an Omicron-managed physical disk in a blueprint.", + "oneOf": [ + { + "description": "The physical disk is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The physical disk is permanently gone.", + "type": "object", + "properties": { + "as_of_generation": { + "description": "Generation of the parent config in which this disk became expunged.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + }, + "ready_for_cleanup": { + "description": "True if Reconfiguration knows that this disk has been expunged.\n\nIn the current implementation, this means either:\n\na) the sled where the disk was residing has been expunged.\n\nb) the planner has observed an inventory collection where the disk expungement was seen by the sled agent on the sled where the disk was previously in service. This is indicated by the inventory reporting a disk generation at least as high as `as_of_generation`.", + "type": "boolean" + } + }, + "required": [ + "as_of_generation", + "kind", + "ready_for_cleanup" + ] + } + ] + }, + "BlueprintSledConfig": { + "description": "Information about the configuration of a sled as recorded in a blueprint.\n\nPart of [`Blueprint`].", + "type": "object", + "properties": { + "datasets": { + "$ref": "#/components/schemas/IdMapBlueprintDatasetConfig" + }, + "disks": { + "$ref": "#/components/schemas/IdMapBlueprintPhysicalDiskConfig" + }, + "host_phase_2": { + "$ref": "#/components/schemas/BlueprintHostPhase2DesiredSlots" + }, + "remove_mupdate_override": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" + } + ] + }, + "sled_agent_generation": { + "description": "Generation number used when this type is converted into an `OmicronSledConfig` for use by sled-agent.\n\nThis field is explicitly named `sled_agent_generation` to indicate that it is only required to cover information that changes what Reconfigurator sends to sled agent. For example, changing the sled `state` from `Active` to `Decommissioned` would not require a bump to `sled_agent_generation`, because a `Decommissioned` sled will never be sent an `OmicronSledConfig`.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "state": { + "$ref": "#/components/schemas/SledState" + }, + "zones": { + "$ref": "#/components/schemas/IdMapBlueprintZoneConfig" + } + }, + "required": [ + "datasets", + "disks", + "host_phase_2", + "sled_agent_generation", + "state", + "zones" + ] + }, + "BlueprintSource": { + "description": "Description of the source of a blueprint.", + "oneOf": [ + { + "description": "The initial blueprint created by the rack setup service.", + "type": "object", + "properties": { + "source": { + "type": "string", + "enum": [ + "rss" + ] + } + }, + "required": [ + "source" + ] + }, + { + "description": "A blueprint created by the planner, and we still have the associated planning report.", + "type": "object", + "properties": { + "add": { + "$ref": "#/components/schemas/PlanningAddStepReport" + }, + "cockroachdb_settings": { + "$ref": "#/components/schemas/PlanningCockroachdbSettingsStepReport" + }, + "decommission": { + "$ref": "#/components/schemas/PlanningDecommissionStepReport" + }, + "expunge": { + "$ref": "#/components/schemas/PlanningExpungeStepReport" + }, + "mgs_updates": { + "$ref": "#/components/schemas/PlanningMgsUpdatesStepReport" + }, + "nexus_generation_bump": { + "$ref": "#/components/schemas/PlanningNexusGenerationBumpReport" + }, + "noop_image_source": { + "$ref": "#/components/schemas/PlanningNoopImageSourceStepReport" + }, + "planner_config": { + "description": "The configuration in effect for this planning run.", + "allOf": [ + { + "$ref": "#/components/schemas/PlannerConfig" + } + ] + }, + "source": { + "type": "string", + "enum": [ + "planner" + ] + }, + "zone_updates": { + "$ref": "#/components/schemas/PlanningZoneUpdatesStepReport" + } + }, + "required": [ + "add", + "cockroachdb_settings", + "decommission", + "expunge", + "mgs_updates", + "nexus_generation_bump", + "noop_image_source", + "planner_config", + "source", + "zone_updates" + ] + }, + { + "description": "A blueprint created by the planner but loaded from the database, so we no longer have the associated planning report.", + "type": "object", + "properties": { + "source": { + "type": "string", + "enum": [ + "planner_loaded_from_database" + ] + } + }, + "required": [ + "source" + ] + }, + { + "description": "This blueprint was created by one of `reconfigurator-cli`'s blueprint editing subcommands.", + "type": "object", + "properties": { + "source": { + "type": "string", + "enum": [ + "reconfigurator_cli_edit" + ] + } + }, + "required": [ + "source" + ] + }, + { + "description": "This blueprint was constructed by hand by an automated test.", + "type": "object", + "properties": { + "source": { + "type": "string", + "enum": [ + "test" + ] + } + }, + "required": [ + "source" + ] + } + ] + }, + "BlueprintTarget": { + "description": "Describes what blueprint, if any, the system is currently working toward", + "type": "object", + "properties": { + "enabled": { + "description": "policy: should the system actively work towards this blueprint\n\nThis should generally be left enabled.", + "type": "boolean" + }, + "target_id": { + "description": "id of the blueprint that the system is trying to make real", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "time_made_target": { + "description": "when this blueprint was made the target", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "enabled", + "target_id", + "time_made_target" + ] + }, + "BlueprintTargetSet": { + "description": "Specifies what blueprint, if any, the system should be working toward", + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "target_id": { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + }, + "required": [ + "enabled", + "target_id" + ] + }, + "BlueprintZoneConfig": { + "description": "Describes one Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintSledConfig`].", + "type": "object", + "properties": { + "disposition": { + "description": "The disposition (desired state) of this zone recorded in the blueprint.", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintZoneDisposition" + } + ] + }, + "filesystem_pool": { + "description": "zpool used for the zone's (transient) root filesystem", + "allOf": [ + { + "$ref": "#/components/schemas/ZpoolName" + } + ] + }, + "id": { + "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" + }, + "image_source": { + "$ref": "#/components/schemas/BlueprintZoneImageSource" + }, + "zone_type": { + "$ref": "#/components/schemas/BlueprintZoneType" + } + }, + "required": [ + "disposition", + "filesystem_pool", + "id", + "image_source", + "zone_type" + ] + }, + "BlueprintZoneDisposition": { + "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", + "oneOf": [ + { + "description": "The zone is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The zone is permanently gone.", + "type": "object", + "properties": { + "as_of_generation": { + "description": "Generation of the parent config in which this zone became expunged.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + }, + "ready_for_cleanup": { + "description": "True if Reconfiguration knows that this zone has been shut down and will not be restarted.\n\nIn the current implementation, this means the planner has observed an inventory collection where the sled on which this zone was running (a) is no longer running the zone and (b) has a config generation at least as high as `as_of_generation`, indicating it will not try to start the zone on a cold boot based on an older config.", + "type": "boolean" + } + }, + "required": [ + "as_of_generation", + "kind", + "ready_for_cleanup" + ] + } + ] + }, + "BlueprintZoneImageSource": { + "description": "Where the zone's image source is located.\n\nThis is the blueprint version of [`OmicronZoneImageSource`].", + "oneOf": [ + { + "description": "This zone's image source is whatever happens to be on the sled's \"install\" dataset.\n\nThis is whatever was put in place at the factory or by the latest MUPdate. The image used here can vary by sled and even over time (if the sled gets MUPdated again).\n\nHistorically, this was the only source for zone images. In an system with automated control-plane-driven update we expect to only use this variant in emergencies where the system had to be recovered via MUPdate.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "install_dataset" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "This zone's image source is the artifact matching this hash from the TUF artifact store (aka \"TUF repo depot\").\n\nThis originates from TUF repos uploaded to Nexus which are then replicated out to all sleds.", + "type": "object", + "properties": { + "hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "type": { + "type": "string", + "enum": [ + "artifact" + ] + }, + "version": { + "$ref": "#/components/schemas/BlueprintArtifactVersion" + } + }, + "required": [ + "hash", + "type", + "version" + ] + } + ] + }, + "BlueprintZoneType": { + "oneOf": [ + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "external_ip": { + "$ref": "#/components/schemas/OmicronZoneExternalSnatIp" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + }, + "required": [ + "address", + "dns_servers", + "external_ip", + "nic", + "ntp_servers", + "type" + ] + }, + { + "description": "Used in single-node clickhouse setups", + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "description": "Used in replicated clickhouse setups", + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_server" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + }, + "required": [ + "address", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneExternalFloatingAddr" + } + ] + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + }, + "required": [ + "address", + "type" + ] + }, + { + "type": "object", + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneExternalFloatingIp" + } + ] + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "lockstep_port": { + "description": "The port at which the lockstep server is reachable. This shares the same IP address with `internal_address`.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "nexus_generation": { + "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + }, + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "lockstep_port", + "nexus_generation", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + }, + "required": [ + "address", + "type" + ] + } + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "ClickhouseClusterConfig": { + "description": "Global configuration for all clickhouse servers (replicas) and keepers", + "type": "object", + "properties": { + "cluster_name": { + "description": "An arbitrary name for the Clickhouse cluster shared by all nodes", + "type": "string" + }, + "cluster_secret": { + "description": "An arbitrary string shared by all nodes used at runtime to determine whether nodes are part of the same cluster.", + "type": "string" + }, + "generation": { + "description": "The last update to the clickhouse cluster configuration\n\nThis is used by `clickhouse-admin` in the clickhouse server and keeper zones to discard old configurations.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "highest_seen_keeper_leader_committed_log_index": { + "description": "This is used as a marker to tell if the raft configuration in a new inventory collection is newer than the last collection. This serves as a surrogate for the log index of the last committed configuration, which clickhouse keeper doesn't expose.\n\nThis is necesssary because during inventory collection we poll multiple keeper nodes, and each returns their local knowledge of the configuration. But we may reach different nodes in different attempts, and some nodes in a following attempt may reflect stale configuration. Due to timing, we can always query old information. That is just normal polling. However, we never want to use old configuration if we have already seen and acted on newer configuration.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "keepers": { + "description": "The desired state of the clickhouse keeper cluster\n\nWe decouple deployment of zones that should contain clickhouse keeper processes from actually starting or stopping those processes, adding or removing them to/from the keeper cluster, and reconfiguring other keeper and clickhouse server nodes to reflect the new configuration.\n\nAs part of this decoupling, we keep track of the intended zone deployment in the blueprint, but that is not enough to track the desired state of the keeper cluster. We are only allowed to add or remove one keeper node at a time, and therefore we must track the desired state of the keeper cluster which may change multiple times until the keepers in the cluster match the deployed zones. An example may help:\n\n1. We start with 3 keeper nodes in 3 deployed keeper zones and need to add two to reach our desired policy of 5 keepers 2. The planner adds 2 new keeper zones to the blueprint 3. The planner will also add **one** new keeper to the `keepers` field below that matches one of the deployed zones. 4. The executor will start the new keeper process that was added to the `keepers` field, attempt to add it to the keeper cluster by pushing configuration updates to the other keepers, and then updating the clickhouse server configurations to know about the new keeper. 5. If the keeper is successfully added, as reflected in inventory, then steps 3 and 4 above will be repeated for the next keeper process. 6. If the keeper is not successfully added by the executor it will continue to retry indefinitely. 7. If the zone is expunged while the planner has it as part of its desired state in `keepers`, and the executor is trying to add it, the keeper will be removed from `keepers` in the next blueprint. If it has been added to the actual cluster by an executor in the meantime it will be removed on the next iteration of an executor.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/KeeperId" + } + }, + "max_used_keeper_id": { + "description": "Clickhouse Keeper IDs must be unique and are handed out monotonically. Keep track of the last used one.", + "allOf": [ + { + "$ref": "#/components/schemas/KeeperId" + } + ] + }, + "max_used_server_id": { + "description": "Clickhouse Server IDs must be unique and are handed out monotonically. Keep track of the last used one.", + "allOf": [ + { + "$ref": "#/components/schemas/ServerId" + } + ] + }, + "servers": { + "description": "The desired state of clickhouse server processes on the rack\n\nClickhouse servers do not have the same limitations as keepers and can be deployed all at once.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ServerId" + } + } + }, + "required": [ + "cluster_name", + "cluster_secret", + "generation", + "highest_seen_keeper_leader_committed_log_index", + "keepers", + "max_used_keeper_id", + "max_used_server_id", + "servers" + ] + }, + "ClickhouseMode": { + "description": "How to deploy clickhouse nodes", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "single_node_only" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cluster_only" + ] + }, + "value": { + "type": "object", + "properties": { + "target_keepers": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "target_servers": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "target_keepers", + "target_servers" + ] + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "both" + ] + }, + "value": { + "type": "object", + "properties": { + "target_keepers": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "target_servers": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "target_keepers", + "target_servers" + ] + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "ClickhousePolicy": { + "type": "object", + "properties": { + "mode": { + "$ref": "#/components/schemas/ClickhouseMode" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "mode", + "time_created", + "version" + ] + }, + "CockroachDbClusterVersion": { + "description": "CockroachDB cluster versions we are aware of.\n\nCockroachDB can be upgraded from one major version to the next, e.g. v22.1 -> v22.2. Each major version introduces changes in how it stores data on disk to support new features, and each major version has support for reading the previous version's data so that it can perform an upgrade. The version of the data format is called the \"cluster version\", which is distinct from but related to the software version that's being run.\n\nWhile software version v22.2 is using cluster version v22.1, it's possible to downgrade back to v22.1. Once the cluster version is upgraded, there's no going back.\n\nTo give us some time to evaluate new versions of the software while retaining a downgrade path, we currently deploy new versions of CockroachDB across two releases of the Oxide software, in a \"tick-tock\" model:\n\n- In \"tick\" releases, we upgrade the version of the CockroachDB software to a new major version, and update `CockroachDbClusterVersion::NEWLY_INITIALIZED`. On upgraded racks, the new version is running with the previous cluster version; on newly-initialized racks, the new version is running with the new cluser version. - In \"tock\" releases, we change `CockroachDbClusterVersion::POLICY` to the major version we upgraded to in the last \"tick\" release. This results in a new blueprint that upgrades the cluster version, destroying the downgrade path but allowing us to eventually upgrade to the next release.\n\nThese presently describe major versions of CockroachDB. The order of these must be maintained in the correct order (the first variant must be the earliest version).", + "type": "string", + "enum": [ + "V22_1" + ] + }, + "CockroachDbPreserveDowngrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to.", + "oneOf": [ + { + "description": "Do not modify the setting.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "do_not_modify" + ] + } + }, + "required": [ + "action" + ] + }, + { + "description": "Ensure the setting is set to an empty string.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "allow_upgrade" + ] + } + }, + "required": [ + "action" + ] + }, + { + "description": "Ensure the setting is set to a given cluster version.", + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": [ + "set" + ] + }, + "data": { + "$ref": "#/components/schemas/CockroachDbClusterVersion" + } + }, + "required": [ + "action", + "data" + ] + } + ] + }, + "CockroachdbUnsafeToShutdown": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_live_nodes_stat" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_underreplicated_stat" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "live_nodes": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "not_enough_live_nodes" + ] + } + }, + "required": [ + "live_nodes", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "not_enough_nodes" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "n": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "underreplicated_ranges" + ] + } + }, + "required": [ + "n", + "type" + ] + } + ] + }, + "CompletedAttempt": { + "description": "externally-exposed status for a completed attempt", + "type": "object", + "properties": { + "elapsed": { + "$ref": "#/components/schemas/Duration" + }, + "nattempts_done": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "request": { + "$ref": "#/components/schemas/PendingMgsUpdate" + }, + "result": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/UpdateCompletedHow" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/UpdateCompletedHow" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "time_done": { + "type": "string", + "format": "date-time" + }, + "time_started": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "elapsed", + "nattempts_done", + "request", + "result", + "time_done", + "time_started" + ] + }, + "CompressionAlgorithm": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "on" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "off" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "gzip" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "level": { + "$ref": "#/components/schemas/GzipLevel" + }, + "type": { + "type": "string", + "enum": [ + "gzip_n" + ] + } + }, + "required": [ + "level", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lz4" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "lzjb" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zle" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "CurrentStatus": { + "description": "Describes the current status of a background task", + "oneOf": [ + { + "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", + "type": "object", + "properties": { + "current_status": { + "type": "string", + "enum": [ + "idle" + ] + } + }, + "required": [ + "current_status" + ] + }, + { + "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", + "type": "object", + "properties": { + "current_status": { + "type": "string", + "enum": [ + "running" + ] + }, + "details": { + "$ref": "#/components/schemas/CurrentStatusRunning" + } + }, + "required": [ + "current_status", + "details" + ] + } + ] + }, + "CurrentStatusRunning": { + "type": "object", + "properties": { + "iteration": { + "description": "which iteration this was (counter)", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "reason": { + "description": "what kind of event triggered this activation", + "allOf": [ + { + "$ref": "#/components/schemas/ActivationReason" + } + ] + }, + "start_time": { + "description": "wall-clock time when the current activation started", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "iteration", + "reason", + "start_time" + ] + }, + "DatasetKind": { + "description": "The kind of dataset. See the `DatasetKind` enum in omicron-common for possible values.", + "type": "string" + }, + "DemoSaga": { + "description": "Identifies an instance of the demo saga", + "type": "object", + "properties": { + "demo_saga_id": { + "$ref": "#/components/schemas/TypedUuidForDemoSagaKind" + }, + "saga_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "demo_saga_id", + "saga_id" + ] + }, + "DiscretionaryZonePlacement": { + "type": "object", + "properties": { + "kind": { + "type": "string" + }, + "source": { + "type": "string" + } + }, + "required": [ + "kind", + "source" + ] + }, + "DiskIdentity": { + "description": "Uniquely identifies a disk.", + "type": "object", + "properties": { + "model": { + "type": "string" + }, + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" + } + }, + "required": [ + "model", + "serial", + "vendor" + ] + }, + "Duration": { + "type": "object", + "properties": { + "nanos": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "secs": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "nanos", + "secs" + ] + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "ExpectedActiveRotSlot": { + "description": "Describes the expected active RoT slot, and the version we expect to find for it", + "type": "object", + "properties": { + "slot": { + "$ref": "#/components/schemas/RotSlot" + }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" + } + }, + "required": [ + "slot", + "version" + ] + }, + "ExpectedVersion": { + "description": "Describes the version that we expect to find in some firmware slot", + "oneOf": [ + { + "description": "We expect to find _no_ valid caboose in this slot", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "no_valid_version" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "description": "We expect to find the specified version in this slot", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "version" + ] + }, + "version": { + "$ref": "#/components/schemas/ArtifactVersion" + } + }, + "required": [ + "kind", + "version" + ] + } + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "GzipLevel": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "HeldDbClaimInfo": { + "description": "Describes an outstanding database claim (for debugging why quiesce is stuck)", + "type": "object", + "properties": { + "debug": { + "type": "string" + }, + "held_since": { + "type": "string", + "format": "date-time" + }, + "id": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "debug", + "held_since", + "id" + ] + }, + "HostPhase1Status": { + "oneOf": [ + { + "description": "This device has no host phase 1 status because it is not a sled (e.g., it's a PSC or switch).", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "not_a_sled" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "active_slot": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "sled" + ] + }, + "sled_id": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "kind", + "slot_a_version", + "slot_b_version" + ] + } + ] + }, + "HostPhase2Status": { + "type": "object", + "properties": { + "boot_disk": { + "x-rust-type": { + "crate": "std", + "parameters": [ + { + "$ref": "#/components/schemas/M2Slot" + }, + { + "type": "string" + } + ], + "path": "::std::result::Result", + "version": "*" + }, + "oneOf": [ + { + "type": "object", + "properties": { + "ok": { + "$ref": "#/components/schemas/M2Slot" + } + }, + "required": [ + "ok" + ] + }, + { + "type": "object", + "properties": { + "err": { + "type": "string" + } + }, + "required": [ + "err" + ] + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "boot_disk", + "slot_a_version", + "slot_b_version" + ] + }, + "IdMapBlueprintDatasetConfig": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintDatasetConfig" + } + }, + "IdMapBlueprintPhysicalDiskConfig": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintPhysicalDiskConfig" + } + }, + "IdMapBlueprintZoneConfig": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + }, + "InProgressUpdateStatus": { + "description": "externally-exposed status for each in-progress update", + "type": "object", + "properties": { + "baseboard_id": { + "$ref": "#/components/schemas/BaseboardId" + }, + "nattempts_done": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "status": { + "$ref": "#/components/schemas/UpdateAttemptStatus" + }, + "time_started": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "baseboard_id", + "nattempts_done", + "status", + "time_started" + ] + }, + "Instance": { + "description": "View of an Instance", + "type": "object", + "properties": { + "auto_restart_cooldown_expiration": { + "nullable": true, + "description": "The time at which the auto-restart cooldown period for this instance completes, permitting it to be automatically restarted again. If the instance enters the `Failed` state, it will not be restarted until after this time.\n\nIf this is not present, then either the instance has never been automatically restarted, or the cooldown period has already expired, allowing the instance to be restarted immediately if it fails.", + "type": "string", + "format": "date-time" + }, + "auto_restart_enabled": { + "description": "`true` if this instance's auto-restart policy will permit the control plane to automatically restart it if it enters the `Failed` state.", + "type": "boolean" + }, + "auto_restart_policy": { + "nullable": true, + "description": "The auto-restart policy configured for this instance, or `null` if no explicit policy has been configured.\n\nThis policy determines whether the instance should be automatically restarted by the control plane on failure. If this is `null`, the control plane will use the default policy when determining whether or not to automatically restart this instance, which may or may not allow it to be restarted. The value of the `auto_restart_enabled` field indicates whether the instance will be auto-restarted, based on its current policy or the default if it has no configured policy.", + "allOf": [ + { + "$ref": "#/components/schemas/InstanceAutoRestartPolicy" + } + ] + }, + "boot_disk_id": { + "nullable": true, + "description": "the ID of the disk used to boot this Instance, if a specific one is assigned.", + "type": "string", + "format": "uuid" + }, + "cpu_platform": { + "nullable": true, + "description": "The CPU platform for this instance. If this is `null`, the instance requires no particular CPU platform.", + "allOf": [ + { + "$ref": "#/components/schemas/InstanceCpuPlatform" + } + ] + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "hostname": { + "description": "RFC1035-compliant hostname for the Instance.", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "memory": { + "description": "memory allocated for this Instance", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "ncpus": { + "description": "number of CPUs allocated for this Instance", + "allOf": [ + { + "$ref": "#/components/schemas/InstanceCpuCount" + } + ] + }, + "project_id": { + "description": "id for the project containing this Instance", + "type": "string", + "format": "uuid" + }, + "run_state": { + "$ref": "#/components/schemas/InstanceState" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_last_auto_restarted": { + "nullable": true, + "description": "The timestamp of the most recent time this instance was automatically restarted by the control plane.\n\nIf this is not present, then this instance has not been automatically restarted.", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "time_run_state_updated": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "auto_restart_enabled", + "description", + "hostname", + "id", + "memory", + "name", + "ncpus", + "project_id", + "run_state", + "time_created", + "time_modified", + "time_run_state_updated" + ] + }, + "InstanceAutoRestartPolicy": { + "description": "A policy determining when an instance should be automatically restarted by the control plane.", + "oneOf": [ + { + "description": "The instance should not be automatically restarted by the control plane if it fails.", + "type": "string", + "enum": [ + "never" + ] + }, + { + "description": "If this instance is running and unexpectedly fails (e.g. due to a host software crash or unexpected host reboot), the control plane will make a best-effort attempt to restart it. The control plane may choose not to restart the instance to preserve the overall availability of the system.", + "type": "string", + "enum": [ + "best_effort" + ] + } + ] + }, + "InstanceCpuCount": { + "description": "The number of CPUs in an Instance", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "InstanceCpuPlatform": { + "description": "A required CPU platform for an instance.\n\nWhen an instance specifies a required CPU platform:\n\n- The system may expose (to the VM) new CPU features that are only present on that platform (or on newer platforms of the same lineage that also support those features). - The instance must run on hosts that have CPUs that support all the features of the supplied platform.\n\nThat is, the instance is restricted to hosts that have the CPUs which support all features of the required platform, but in exchange the CPU features exposed by the platform are available for the guest to use. Note that this may prevent an instance from starting (if the hosts that could run it are full but there is capacity on other incompatible hosts).\n\nIf an instance does not specify a required CPU platform, then when it starts, the control plane selects a host for the instance and then supplies the guest with the \"minimum\" CPU platform supported by that host. This maximizes the number of hosts that can run the VM if it later needs to migrate to another host.\n\nIn all cases, the CPU features presented by a given CPU platform are a subset of what the corresponding hardware may actually support; features which cannot be used from a virtual environment or do not have full hypervisor support may be masked off. See RFD 314 for specific CPU features in a CPU platform.", + "oneOf": [ + { + "description": "An AMD Milan-like CPU platform.", + "type": "string", + "enum": [ + "amd_milan" + ] + }, + { + "description": "An AMD Turin-like CPU platform.", + "type": "string", + "enum": [ + "amd_turin" + ] + } + ] + }, + "InstanceMigrateRequest": { + "description": "Parameters used when migrating an instance.", + "type": "object", + "properties": { + "dst_sled_id": { + "description": "The ID of the sled to which to migrate the target instance.", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "dst_sled_id" + ] + }, + "InstanceState": { + "description": "Running state of an Instance (primarily: booted or stopped)\n\nThis typically reflects whether it's starting, running, stopping, or stopped, but also includes states related to the Instance's lifecycle", + "oneOf": [ + { + "description": "The instance is being created.", + "type": "string", + "enum": [ + "creating" + ] + }, + { + "description": "The instance is currently starting up.", + "type": "string", + "enum": [ + "starting" + ] + }, + { + "description": "The instance is currently running.", + "type": "string", + "enum": [ + "running" + ] + }, + { + "description": "The instance has been requested to stop and a transition to \"Stopped\" is imminent.", + "type": "string", + "enum": [ + "stopping" + ] + }, + { + "description": "The instance is currently stopped.", + "type": "string", + "enum": [ + "stopped" + ] + }, + { + "description": "The instance is in the process of rebooting - it will remain in the \"rebooting\" state until the VM is starting once more.", + "type": "string", + "enum": [ + "rebooting" + ] + }, + { + "description": "The instance is in the process of migrating - it will remain in the \"migrating\" state until the migration process is complete and the destination propolis is ready to continue execution.", + "type": "string", + "enum": [ + "migrating" + ] + }, + { + "description": "The instance is attempting to recover from a failure.", + "type": "string", + "enum": [ + "repairing" + ] + }, + { + "description": "The instance has encountered a failure.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "The instance has been deleted.", + "type": "string", + "enum": [ + "destroyed" + ] + } + ] + }, + "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + } + ] + }, + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + }, + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + }, + "KeeperId": { + "description": "A unique ID for a ClickHouse Keeper", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "LastResult": { + "oneOf": [ + { + "description": "The task has never completed an activation", + "type": "object", + "properties": { + "last_result": { + "type": "string", + "enum": [ + "never_completed" + ] + } + }, + "required": [ + "last_result" + ] + }, + { + "description": "The task has completed at least one activation", + "type": "object", + "properties": { + "details": { + "$ref": "#/components/schemas/LastResultCompleted" + }, + "last_result": { + "type": "string", + "enum": [ + "completed" + ] + } + }, + "required": [ + "details", + "last_result" + ] + } + ] + }, + "LastResultCompleted": { + "type": "object", + "properties": { + "details": { + "description": "arbitrary datum emitted by the background task" + }, + "elapsed": { + "description": "total time elapsed during the activation", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "iteration": { + "description": "which iteration this was (counter)", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "reason": { + "description": "what kind of event triggered this activation", + "allOf": [ + { + "$ref": "#/components/schemas/ActivationReason" + } + ] + }, + "start_time": { + "description": "wall-clock time when the activation started", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "details", + "elapsed", + "iteration", + "reason", + "start_time" + ] + }, + "M2Slot": { + "description": "Describes an M.2 slot, often in the context of writing a system image to it.", + "type": "string", + "enum": [ + "A", + "B" + ] + }, + "MacAddr": { + "example": "ff:ff:ff:ff:ff:ff", + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", + "minLength": 5, + "maxLength": 17 + }, + "MgsDrivenUpdateStatus": { + "type": "object", + "properties": { + "baseboard_description": { + "type": "string" + }, + "host_os_phase_1": { + "$ref": "#/components/schemas/HostPhase1Status" + }, + "rot": { + "$ref": "#/components/schemas/RotStatus" + }, + "rot_bootloader": { + "$ref": "#/components/schemas/RotBootloaderStatus" + }, + "sp": { + "$ref": "#/components/schemas/SpStatus" + } + }, + "required": [ + "baseboard_description", + "host_os_phase_1", + "rot", + "rot_bootloader", + "sp" + ] + }, + "MgsUpdateDriverStatus": { + "description": "Status of ongoing update attempts, recently completed attempts, and update requests that are waiting for retry.", + "type": "object", + "properties": { + "in_progress": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/InProgressUpdateStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/InProgressUpdateStatus" + }, + "uniqueItems": true + }, + "recent": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CompletedAttempt" + } + }, + "waiting": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/WaitingStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/WaitingStatus" + }, + "uniqueItems": true + } + }, + "required": [ + "in_progress", + "recent", + "waiting" + ] + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ] + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, + "NexusGenerationBumpWaitingOn": { + "oneOf": [ + { + "description": "Waiting for the planner to finish updating all non-Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "found_old_non_nexus_zones" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for the planner to deploy new-generation Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_new_nexus_in_blueprint" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for `db_metadata_nexus` records to be deployed for new-generation Nexus zones", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_nexus_database_access_records" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for newly deployed Nexus zones to appear to inventory", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "missing_new_nexus_in_inventory" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "NodeName": { + "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", + "type": "string" + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "properties": { + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "pool_name" + ] + }, + "OmicronZoneExternalFloatingAddr": { + "description": "Floating external address with port allocated to an Omicron-managed zone.", + "type": "object", + "properties": { + "addr": { + "type": "string" + }, + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + } + }, + "required": [ + "addr", + "id" + ] + }, + "OmicronZoneExternalFloatingIp": { + "description": "Floating external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + }, + "ip": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "id", + "ip" + ] + }, + "OmicronZoneExternalSnatIp": { + "description": "SNAT (outbound) external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + }, + "snat_cfg": { + "$ref": "#/components/schemas/SourceNatConfig" + } + }, + "required": [ + "id", + "snat_cfg" + ] + }, + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", + "oneOf": [ + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/components/schemas/SourceNatConfig" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + }, + "required": [ + "address", + "dns_servers", + "nic", + "ntp_servers", + "snat_cfg", + "type" + ] + }, + { + "description": "Type of clickhouse zone used for a single node clickhouse deployment", + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "description": "A zone used to run a Clickhouse Keeper node\n\nKeepers are only used in replicated clickhouse setups", + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "description": "A zone used to run a Clickhouse Server in a replicated deployment", + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_server" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + }, + "required": [ + "address", + "dataset", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + }, + "required": [ + "address", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + }, + "required": [ + "address", + "type" + ] + }, + { + "type": "object", + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "type": "string", + "format": "ip" + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "lockstep_port": { + "description": "The port at which the internal lockstep server is reachable. This shares the same IP address with `internal_address`.", + "default": 12232, + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + }, + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", + "type" + ] + }, + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + }, + "required": [ + "address", + "type" + ] + } + ] + }, + "OximeterReadMode": { + "description": "Where oximeter should read from", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "single_node" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cluster" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "OximeterReadPolicy": { + "type": "object", + "properties": { + "mode": { + "$ref": "#/components/schemas/OximeterReadMode" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "mode", + "time_created", + "version" + ] + }, + "PendingMgsUpdate": { + "type": "object", + "properties": { + "artifact_hash": { + "description": "which artifact to apply to this device", + "type": "string", + "format": "hex string (32 bytes)" + }, + "artifact_version": { + "$ref": "#/components/schemas/ArtifactVersion" + }, + "baseboard_id": { + "description": "id of the baseboard that we're going to update", + "allOf": [ + { + "$ref": "#/components/schemas/BaseboardId" + } + ] + }, + "details": { + "description": "component-specific details of the pending update", + "allOf": [ + { + "$ref": "#/components/schemas/PendingMgsUpdateDetails" + } + ] + }, + "slot_id": { + "description": "last known MGS slot (cubby number) of the baseboard", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "sp_type": { + "description": "what type of baseboard this is", + "allOf": [ + { + "$ref": "#/components/schemas/SpType" + } + ] + } + }, + "required": [ + "artifact_hash", + "artifact_version", + "baseboard_id", + "details", + "slot_id", + "sp_type" + ] + }, + "PendingMgsUpdateDetails": { + "description": "Describes the component-specific details of a PendingMgsUpdate", + "oneOf": [ + { + "description": "the SP itself is being updated", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "sp" + ] + }, + "expected_active_version": { + "description": "expected contents of the active slot", + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactVersion" + } + ] + }, + "expected_inactive_version": { + "description": "expected contents of the inactive slot", + "allOf": [ + { + "$ref": "#/components/schemas/ExpectedVersion" + } + ] + } + }, + "required": [ + "component", + "expected_active_version", + "expected_inactive_version" + ] + }, + { + "description": "the RoT is being updated", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "rot" + ] + }, + "expected_active_slot": { + "$ref": "#/components/schemas/ExpectedActiveRotSlot" + }, + "expected_inactive_version": { + "$ref": "#/components/schemas/ExpectedVersion" + }, + "expected_pending_persistent_boot_preference": { + "nullable": true, + "description": "the persistent boot preference written into the CFPA scratch page that will become the persistent boot preference in the authoritative CFPA page upon reboot, unless CFPA update of the authoritative page fails for some reason.", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "expected_persistent_boot_preference": { + "description": "the persistent boot preference written into the current authoritative CFPA page (ping or pong)", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "expected_transient_boot_preference": { + "nullable": true, + "description": "override persistent preference selection for a single boot", + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + } + }, + "required": [ + "component", + "expected_active_slot", + "expected_inactive_version", + "expected_persistent_boot_preference" + ] + }, + { + "description": "the RoT bootloader is being updated", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "rot_bootloader" + ] + }, + "expected_stage0_next_version": { + "description": "expected contents of the stage 0 next", + "allOf": [ + { + "$ref": "#/components/schemas/ExpectedVersion" + } + ] + }, + "expected_stage0_version": { + "description": "expected contents of the stage 0", + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactVersion" + } + ] + } + }, + "required": [ + "component", + "expected_stage0_next_version", + "expected_stage0_version" + ] + }, + { + "description": "the host OS is being updated\n\nWe write the phase 1 via MGS, and have a precheck condition that sled-agent has already written the matching phase 2.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "host_phase1" + ] + }, + "expected_active_phase_1_hash": { + "description": "The hash of the phase 1 slot specified by `expected_active_phase_1_hash`.\n\nWe should always be able to fetch this. Even if the phase 1 contents themselves have been corrupted (very scary for the active slot!), the SP can still hash those contents.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "expected_active_phase_1_slot": { + "description": "Which slot is currently active according to the SP.\n\nThis controls which slot will be used the next time the sled boots; it will _usually_ match `boot_disk`, but differs in the window of time between telling the SP to change which slot to use and the host OS rebooting to actually use that slot.", + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } + ] + }, + "expected_active_phase_2_hash": { + "description": "The hash of the currently-active phase 2 artifact.\n\nIt's possible sled-agent won't be able to report this value, but that would indicate that we don't know the version currently running. The planner wouldn't stage an update without knowing the current version, so if something has gone wrong in the meantime we won't proceede either.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "expected_boot_disk": { + "description": "Which slot the host OS most recently booted from.", + "allOf": [ + { + "$ref": "#/components/schemas/M2Slot" + } + ] + }, + "expected_inactive_phase_1_hash": { + "description": "The hash of the phase 1 slot specified by toggling `expected_active_phase_1_slot` to the other slot.\n\nWe should always be able to fetch this. Even if the phase 1 contents of the inactive slot are entirely bogus, the SP can still hash those contents.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "expected_inactive_phase_2_hash": { + "description": "The hash of the currently-inactive phase 2 artifact.\n\nIt's entirely possible that a sled needing a host OS update has no valid artifact in its inactive slot. However, a precondition for us performing a phase 1 update is that `sled-agent` on the target sled has already written the paired phase 2 artifact to the inactive slot; therefore, we don't need to be able to represent an invalid inactive slot.", + "type": "string", + "format": "hex string (32 bytes)" + }, + "sled_agent_address": { + "description": "Address for contacting sled-agent to check phase 2 contents.", + "type": "string" + } + }, + "required": [ + "component", + "expected_active_phase_1_hash", + "expected_active_phase_1_slot", + "expected_active_phase_2_hash", + "expected_boot_disk", + "expected_inactive_phase_1_hash", + "expected_inactive_phase_2_hash", + "sled_agent_address" + ] + } + ] + }, + "PendingMgsUpdates": { + "type": "object", + "properties": { + "by_baseboard": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/PendingMgsUpdate" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/PendingMgsUpdate" + }, + "uniqueItems": true + } + }, + "required": [ + "by_baseboard" + ] + }, + "PendingRecovery": { + "description": "Snapshot of reassignment state when a recovery pass started", + "type": "object", + "properties": { + "blueprint_id": { + "nullable": true, + "description": "which blueprint id we'd be fully caught up to upon completion", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "generation": { + "description": "what `reassignment_generation` was when this recovery started", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + } + }, + "required": [ + "generation" + ] + }, + "PendingSagaInfo": { + "description": "Describes a pending saga (for debugging why quiesce is stuck)", + "type": "object", + "properties": { + "recovered": { + "description": "If true, we know the saga needs to be recovered. It may or may not be running already.\n\nIf false, this saga was created in this Nexus process's lifetime. It's still running.", + "type": "boolean" + }, + "saga_id": { + "type": "string", + "format": "uuid" + }, + "saga_name": { + "type": "string" + }, + "time_pending": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "recovered", + "saga_id", + "saga_name", + "time_pending" + ] + }, + "PhysicalDiskPath": { + "type": "object", + "properties": { + "disk_id": { + "description": "ID of the physical disk", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "disk_id" + ] + }, + "Ping": { + "type": "object", + "properties": { + "status": { + "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", + "allOf": [ + { + "$ref": "#/components/schemas/PingStatus" + } + ] + } + }, + "required": [ + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + }, + "PlannerConfig": { + "type": "object", + "properties": { + "add_zones_with_mupdate_override": { + "description": "Whether to add zones even if a mupdate override is present.\n\nOnce Nexus-driven update is active on a customer system, we must not add new zones while the system is recovering from a MUPdate.\n\nThis setting, which is off by default, allows us to add zones even if we've detected a recent MUPdate on the system.", + "type": "boolean" + } + }, + "required": [ + "add_zones_with_mupdate_override" + ] + }, + "PlanningAddOutOfEligibleSleds": { + "description": "How many discretionary zones we actually placed out of how many we wanted to place.", + "type": "object", + "properties": { + "placed": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "wanted_to_place": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "placed", + "wanted_to_place" + ] + }, + "PlanningAddStepReport": { + "type": "object", + "properties": { + "add_update_blocked_reasons": { + "description": "Reasons why zone adds and any updates are blocked.\n\nThis is typically a list of MUPdate-related reasons.", + "type": "array", + "items": { + "type": "string" + } + }, + "add_zones_with_mupdate_override": { + "description": "The value of the homonymous planner config. (What this really means is that zone adds happen despite being blocked by one or more MUPdate-related reasons.)", + "type": "boolean" + }, + "discretionary_zones_placed": { + "description": "Sled ID → kinds of discretionary zones placed there", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DiscretionaryZonePlacement" + } + } + }, + "out_of_eligible_sleds": { + "description": "Discretionary zone kind → (placed, wanted to place)", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningAddOutOfEligibleSleds" + } + }, + "sleds_getting_ntp_and_discretionary_zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sleds_missing_crucible_zone": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + } + }, + "sleds_missing_ntp_zone": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sleds_waiting_for_ntp_zone": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sleds_without_ntp_zones_in_inventory": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sleds_without_zpools_for_ntp_zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "uniqueItems": true + }, + "sufficient_zones_exist": { + "description": "Discretionary zone kind → (wanted to place, num existing)", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningAddSufficientZonesExist" + } + }, + "waiting_on": { + "nullable": true, + "description": "What are we waiting on to start zone additions?", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneAddWaitingOn" + } + ] + } + }, + "required": [ + "add_update_blocked_reasons", + "add_zones_with_mupdate_override", + "discretionary_zones_placed", + "out_of_eligible_sleds", + "sleds_getting_ntp_and_discretionary_zones", + "sleds_missing_crucible_zone", + "sleds_missing_ntp_zone", + "sleds_waiting_for_ntp_zone", + "sleds_without_ntp_zones_in_inventory", + "sleds_without_zpools_for_ntp_zones", + "sufficient_zones_exist" + ] + }, + "PlanningAddSufficientZonesExist": { + "description": "We have at least the minimum required number of zones of a given kind.", + "type": "object", + "properties": { + "num_existing": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "target_count": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "num_existing", + "target_count" + ] + }, + "PlanningCockroachdbSettingsStepReport": { + "type": "object", + "properties": { + "preserve_downgrade": { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" + } + }, + "required": [ + "preserve_downgrade" + ] + }, + "PlanningDecommissionStepReport": { + "type": "object", + "properties": { + "zombie_sleds": { + "description": "Decommissioned sleds that unexpectedly appeared as commissioned.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + } + }, + "required": [ + "zombie_sleds" + ] + }, + "PlanningExpungeStepReport": { + "type": "object", + "properties": { + "orphan_disks": { + "description": "Expunged disks not present in the parent blueprint.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/TypedUuidForPhysicalDiskKind" + } + } + }, + "required": [ + "orphan_disks" + ] + }, + "PlanningMgsUpdatesStepReport": { + "type": "object", + "properties": { + "pending_mgs_updates": { + "$ref": "#/components/schemas/PendingMgsUpdates" + } + }, + "required": [ + "pending_mgs_updates" + ] + }, + "PlanningNexusGenerationBumpReport": { + "oneOf": [ + { + "description": "We have no reason to bump the Nexus generation number.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "nothing_to_report" + ] + } + }, + "required": [ + "component" + ] + }, + { + "description": "We are waiting on some condition before we can bump the Nexus generation.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "waiting_on" + ] + }, + "value": { + "$ref": "#/components/schemas/NexusGenerationBumpWaitingOn" + } + }, + "required": [ + "component", + "value" + ] + }, + { + "description": "We are bumping the Nexus generation number to this value.", + "type": "object", + "properties": { + "component": { + "type": "string", + "enum": [ + "bumping_generation" + ] + }, + "value": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "component", + "value" + ] + } + ] + }, + "PlanningNoopImageSourceConverted": { + "description": "How many of the total install-dataset zones and/or host phase 2 slots were noop-converted to use the artifact store on a particular sled.", + "type": "object", + "properties": { + "host_phase_2_slot_a_eligible": { + "type": "boolean" + }, + "host_phase_2_slot_b_eligible": { + "type": "boolean" + }, + "num_dataset": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "num_eligible": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "host_phase_2_slot_a_eligible", + "host_phase_2_slot_b_eligible", + "num_dataset", + "num_eligible" + ] + }, + "PlanningNoopImageSourceSkipSledHostPhase2Reason": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "both_slots_already_artifact" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sled_not_in_inventory" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "PlanningNoopImageSourceSkipSledZonesReason": { + "oneOf": [ + { + "type": "object", + "properties": { + "num_total": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "all_zones_already_artifact" + ] + } + }, + "required": [ + "num_total", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sled_not_in_inventory" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "error_retrieving_zone_manifest" + ] + } + }, + "required": [ + "error", + "type" + ] + }, + { + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForMupdateOverrideKind" + }, + "type": { + "type": "string", + "enum": [ + "remove_mupdate_override" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, + "PlanningNoopImageSourceSkipZoneReason": { + "oneOf": [ + { + "type": "object", + "properties": { + "file_name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "zone_not_in_manifest" + ] + }, + "zone_kind": { + "type": "string" + } + }, + "required": [ + "file_name", + "type", + "zone_kind" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "file_name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "invalid_artifact" + ] + }, + "zone_kind": { + "type": "string" + } + }, + "required": [ + "error", + "file_name", + "type", + "zone_kind" + ] + }, + { + "type": "object", + "properties": { + "artifact_hash": { + "type": "string", + "format": "hex string (32 bytes)" + }, + "file_name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "artifact_not_in_repo" + ] + }, + "zone_kind": { + "type": "string" + } + }, + "required": [ + "artifact_hash", + "file_name", + "type", + "zone_kind" + ] + } + ] + }, + "PlanningNoopImageSourceStepReport": { + "type": "object", + "properties": { + "converted": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceConverted" + } + }, + "no_target_release": { + "type": "boolean" + }, + "skipped_sled_host_phase_2": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledHostPhase2Reason" + } + }, + "skipped_sled_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipSledZonesReason" + } + }, + "skipped_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/PlanningNoopImageSourceSkipZoneReason" + } + } + }, + "required": [ + "converted", + "no_target_release", + "skipped_sled_host_phase_2", + "skipped_sled_zones", + "skipped_zones" + ] + }, + "PlanningOutOfDateZone": { + "description": "We have at least the minimum required number of zones of a given kind.", + "type": "object", + "properties": { + "desired_image_source": { + "$ref": "#/components/schemas/BlueprintZoneImageSource" + }, + "zone_config": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + }, + "required": [ + "desired_image_source", + "zone_config" + ] + }, + "PlanningZoneUpdatesStepReport": { + "type": "object", + "properties": { + "expunged_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + } + }, + "out_of_date_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PlanningOutOfDateZone" + } + } + }, + "unsafe_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ZoneUnsafeToShutdown" + } + }, + "updated_zones": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + } + }, + "waiting_on": { + "nullable": true, + "description": "What are we waiting on to start zone updates?", + "allOf": [ + { + "$ref": "#/components/schemas/ZoneUpdatesWaitingOn" + } + ] + }, + "waiting_zones": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ZoneWaitingToExpunge" + } + } + }, + "required": [ + "expunged_zones", + "out_of_date_zones", + "unsafe_zones", + "updated_zones", + "waiting_zones" + ] + }, + "QuiesceState": { + "description": "See [`QuiesceStatus`] for more on Nexus quiescing.\n\nAt any given time, Nexus is always in one of these states:\n\n```text Undetermined (have not loaded persistent state; don't know yet) | | load persistent state and find we're not quiescing v Running (normal operation) | | quiesce starts v DrainingSagas (no new sagas are allowed, but some are still running) | | no more sagas running v DrainingDb (no sagas running; no new db connections may be | acquired by Nexus at-large, but some are still held) | | no more database connections held v RecordingQuiesce (everything is quiesced aside from one connection being | used to record our final quiesced state) | | finish recording quiesce state in database v Quiesced (no sagas running, no database connections in use) ```\n\nQuiescing is (currently) a one-way trip: once a Nexus process starts quiescing, it will never go back to normal operation. It will never go back to an earlier stage, either.", + "oneOf": [ + { + "description": "We have not yet determined based on persistent state if we're supposed to be quiesced or not", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "undetermined" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Normal operation", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "running" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "New sagas disallowed, but some are still running on some Nexus instances", + "type": "object", + "properties": { + "quiesce_details": { + "type": "object", + "properties": { + "time_requested": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "time_requested" + ] + }, + "state": { + "type": "string", + "enum": [ + "draining_sagas" + ] + } + }, + "required": [ + "quiesce_details", + "state" + ] + }, + { + "description": "No sagas running on any Nexus instances\n\nNo new database connections may be claimed, but some database connections are still held.", + "type": "object", + "properties": { + "quiesce_details": { + "type": "object", + "properties": { + "duration_draining_sagas": { + "$ref": "#/components/schemas/Duration" + }, + "time_requested": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "duration_draining_sagas", + "time_requested" + ] + }, + "state": { + "type": "string", + "enum": [ + "draining_db" + ] + } + }, + "required": [ + "quiesce_details", + "state" + ] + }, + { + "description": "No database connections in use except to record the final \"quiesced\" state", + "type": "object", + "properties": { + "quiesce_details": { + "type": "object", + "properties": { + "duration_draining_db": { + "$ref": "#/components/schemas/Duration" + }, + "duration_draining_sagas": { + "$ref": "#/components/schemas/Duration" + }, + "time_requested": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "duration_draining_db", + "duration_draining_sagas", + "time_requested" + ] + }, + "state": { + "type": "string", + "enum": [ + "recording_quiesce" + ] + } + }, + "required": [ + "quiesce_details", + "state" + ] + }, + { + "description": "Nexus has no sagas running and is not using the database", + "type": "object", + "properties": { + "quiesce_details": { + "type": "object", + "properties": { + "duration_draining_db": { + "$ref": "#/components/schemas/Duration" + }, + "duration_draining_sagas": { + "$ref": "#/components/schemas/Duration" + }, + "duration_recording_quiesce": { + "$ref": "#/components/schemas/Duration" + }, + "duration_total": { + "$ref": "#/components/schemas/Duration" + }, + "time_quiesced": { + "type": "string", + "format": "date-time" + }, + "time_requested": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "duration_draining_db", + "duration_draining_sagas", + "duration_recording_quiesce", + "duration_total", + "time_quiesced", + "time_requested" + ] + }, + "state": { + "type": "string", + "enum": [ + "quiesced" + ] + } + }, + "required": [ + "quiesce_details", + "state" + ] + } + ] + }, + "QuiesceStatus": { + "description": "Describes whether Nexus is quiescing or quiesced and what, if anything, is blocking the quiesce process\n\n**Quiescing** is the process of draining Nexus of running sagas and stopping all use of the database in preparation for upgrade. See [`QuiesceState`] for more on the stages involved.", + "type": "object", + "properties": { + "db_claims": { + "title": "IdOrdMap", + "description": "what database claims are currently held (by any part of Nexus)\n\nEntries here prevent transitioning from `WaitingForDb` to `Quiesced`.", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/HeldDbClaimInfo" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/HeldDbClaimInfo" + }, + "uniqueItems": true + }, + "sagas": { + "description": "information about saga quiescing", + "allOf": [ + { + "$ref": "#/components/schemas/SagaQuiesceStatus" + } + ] + }, + "state": { + "description": "what stage of quiescing is Nexus at", + "allOf": [ + { + "$ref": "#/components/schemas/QuiesceState" + } + ] + } + }, + "required": [ + "db_claims", + "sagas", + "state" + ] + }, + "ReconfiguratorConfig": { + "type": "object", + "properties": { + "planner_config": { + "$ref": "#/components/schemas/PlannerConfig" + }, + "planner_enabled": { + "type": "boolean" + } + }, + "required": [ + "planner_config", + "planner_enabled" + ] + }, + "ReconfiguratorConfigParam": { + "type": "object", + "properties": { + "config": { + "$ref": "#/components/schemas/ReconfiguratorConfig" + }, + "version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "config", + "version" + ] + }, + "ReconfiguratorConfigView": { + "type": "object", + "properties": { + "config": { + "$ref": "#/components/schemas/ReconfiguratorConfig" + }, + "time_modified": { + "type": "string", + "format": "date-time" + }, + "version": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "config", + "time_modified", + "version" + ] + }, + "RotBootloaderStatus": { + "type": "object", + "properties": { + "stage0_next_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "stage0_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "stage0_next_version", + "stage0_version" + ] + }, + "RotSlot": { + "oneOf": [ + { + "type": "object", + "properties": { + "slot": { + "type": "string", + "enum": [ + "a" + ] + } + }, + "required": [ + "slot" + ] + }, + { + "type": "object", + "properties": { + "slot": { + "type": "string", + "enum": [ + "b" + ] + } + }, + "required": [ + "slot" + ] + } + ] + }, + "RotStatus": { + "type": "object", + "properties": { + "active_slot": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RotSlot" + } + ] + }, + "slot_a_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot_b_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "slot_a_version", + "slot_b_version" + ] + }, + "Saga": { + "description": "Sagas\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "state": { + "$ref": "#/components/schemas/SagaState" + } + }, + "required": [ + "id", + "state" + ] + }, + "SagaErrorInfo": { + "oneOf": [ + { + "type": "object", + "properties": { + "error": { + "type": "string", + "enum": [ + "action_failed" + ] + }, + "source_error": {} + }, + "required": [ + "error", + "source_error" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string", + "enum": [ + "deserialize_failed" + ] + }, + "message": { + "type": "string" + } + }, + "required": [ + "error", + "message" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string", + "enum": [ + "injected_error" + ] + } + }, + "required": [ + "error" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string", + "enum": [ + "serialize_failed" + ] + }, + "message": { + "type": "string" + } + }, + "required": [ + "error", + "message" + ] + }, + { + "type": "object", + "properties": { + "error": { + "type": "string", + "enum": [ + "subsaga_create_failed" + ] + }, + "message": { + "type": "string" + } + }, + "required": [ + "error", + "message" + ] + } + ] + }, + "SagaQuiesceStatus": { + "type": "object", + "properties": { + "drained_blueprint_id": { + "nullable": true, + "description": "blueprint id that we're \"fully drained up to\"\n\nIf this value is non-`None`, that means that:\n\n- saga creation is disallowed - no sagas are running - we have re-assigned sagas from other Nexus instances expunged in this blueprint or earlier - we have finished recovery for all those sagas (that had been assigned to us as of the re-assignment pass for this blueprint id)\n\nThis means that the only way we can wind up running another saga is if there's a new blueprint that expunges a different Nexus zone.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "first_recovery_complete": { + "description": "whether at least one recovery pass has successfully completed\n\nWe have to track this because we can't quiesce until we know we've recovered all outstanding sagas.", + "type": "boolean" + }, + "new_sagas_allowed": { + "description": "current policy: are we allowed to *create* new sagas?\n\nThis also affects re-assigning sagas from expunged Nexus instances to ourselves. It does **not** affect saga recovery.", + "allOf": [ + { + "$ref": "#/components/schemas/SagasAllowed" + } + ] + }, + "reassignment_blueprint_id": { + "nullable": true, + "description": "blueprint id associated with last successful saga reassignment\n\nSimilar to the generation number, this is used to track whether we've accounted for all sagas for all expungements up through this target blueprint.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "reassignment_generation": { + "description": "generation number for the saga reassignment\n\nThis gets bumped whenever a saga reassignment operation completes that may have re-assigned us some sagas. It's used to keep track of when we've recovered all sagas that could be assigned to us.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "reassignment_pending": { + "description": "whether there is a saga reassignment operation happening\n\nThese operatinos may assign new sagas to Nexus that must be recovered and completed before quiescing can finish.", + "type": "boolean" + }, + "recovered_blueprint_id": { + "nullable": true, + "description": "blueprint id that saga recovery has \"caught up to\"\n\nThis means that we have finished recovering any sagas that were re-assigned to us due to expungements of other Nexus zones up through this blueprint. Put differently: we know that we will never be assigned more sagas due to expungement unless the target blueprint changes past this one.\n\nThis does not mean that we've fully drained all sagas up through this blueprint. There may still be sagas running.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForBlueprintKind" + } + ] + }, + "recovered_reassignment_generation": { + "description": "\"saga reassignment generation number\" that was \"caught up to\" by the last recovery pass\n\nThis is used with `reassignment_generation` to help us know when we've recovered all the sagas that may have been assigned to us during a given reassignment pass. See `reassignment_done()` for details.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "recovery_pending": { + "nullable": true, + "description": "If a recovery pass is ongoing, a snapshot of reassignment state when it started (which reflects what we'll be caught up to when it finishes)", + "allOf": [ + { + "$ref": "#/components/schemas/PendingRecovery" + } + ] + }, + "sagas_pending": { + "title": "IdOrdMap", + "description": "list of sagas we need to wait to complete before quiescing\n\nThese are basically running sagas. They may have been created in this Nexus process lifetime or created in another process and then recovered in this one.", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/PendingSagaInfo" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/PendingSagaInfo" + }, + "uniqueItems": true + } + }, + "required": [ + "first_recovery_complete", + "new_sagas_allowed", + "reassignment_generation", + "reassignment_pending", + "recovered_reassignment_generation", + "sagas_pending" + ] + }, + "SagaResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Saga" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SagaState": { + "oneOf": [ + { + "description": "Saga is currently executing", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "running" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Saga completed successfully", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "succeeded" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "One or more saga actions failed and the saga was successfully unwound (i.e., undo actions were executed for any actions that were completed). The saga is no longer running.", + "type": "object", + "properties": { + "error_info": { + "$ref": "#/components/schemas/SagaErrorInfo" + }, + "error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "state": { + "type": "string", + "enum": [ + "failed" + ] + } + }, + "required": [ + "error_info", + "error_node_name", + "state" + ] + }, + { + "description": "One or more saga actions failed, *and* one or more undo actions failed during unwinding. State managed by the saga may now be inconsistent. Support may be required to repair the state. The saga is no longer running.", + "type": "object", + "properties": { + "error_info": { + "$ref": "#/components/schemas/SagaErrorInfo" + }, + "error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "state": { + "type": "string", + "enum": [ + "stuck" + ] + }, + "undo_error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "undo_source_error": {} + }, + "required": [ + "error_info", + "error_node_name", + "state", + "undo_error_node_name", + "undo_source_error" + ] + } + ] + }, + "SagasAllowed": { + "description": "Policy determining whether new sagas are allowed to be started\n\nThis is used by Nexus quiesce to disallow creation of new sagas when we're trying to quiesce Nexus.", + "oneOf": [ + { + "description": "New sagas may be started (normal condition)", + "type": "string", + "enum": [ + "allowed" + ] + }, + { + "description": "New sagas may not be started because we're quiescing or quiesced", + "type": "string", + "enum": [ + "disallowed_quiesce" + ] + }, + { + "description": "New sagas may not be started because we just started up and haven't determined if we're quiescing yet", + "type": "string", + "enum": [ + "disallowed_unknown" + ] + } + ] + }, + "ServerId": { + "description": "A unique ID for a Clickhouse Server", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "SledAgentUpdateStatus": { + "type": "object", + "properties": { + "host_phase_2": { + "$ref": "#/components/schemas/HostPhase2Status" + }, + "sled_id": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + }, + "zones": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/ZoneStatus" } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/ZoneStatus" + }, + "uniqueItems": true + } + }, + "required": [ + "host_phase_2", + "sled_id", + "zones" + ] + }, + "SledId": { + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + }, + "required": [ + "id" + ] + }, + "SledPolicy": { + "description": "The operator-defined policy of a sled.", + "oneOf": [ + { + "description": "The operator has indicated that the sled is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + }, + "provision_policy": { + "description": "Determines whether new resources can be provisioned onto the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] } - } + }, + "required": [ + "kind", + "provision_policy" + ] }, - "4XX": { - "$ref": "#/components/responses/Error" + { + "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "SledProvisionPolicy": { + "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "oneOf": [ + { + "description": "New resources will be provisioned on this sled.", + "type": "string", + "enum": [ + "provisionable" + ] }, - "5XX": { - "$ref": "#/components/responses/Error" + { + "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", + "type": "string", + "enum": [ + "non_provisionable" + ] + } + ] + }, + "SledSelector": { + "type": "object", + "properties": { + "sled": { + "description": "ID of the sled", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "sled" + ] + }, + "SledState": { + "description": "The current state of the sled.", + "oneOf": [ + { + "description": "The sled is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, + "SourceNatConfig": { + "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", + "type": "object", + "properties": { + "first_port": { + "description": "The first port used for source NAT, inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "ip": { + "description": "The external address provided to the instance or service.", + "type": "string", + "format": "ip" + }, + "last_port": { + "description": "The last port used for source NAT, also inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "first_port", + "ip", + "last_port" + ] + }, + "SpStatus": { + "type": "object", + "properties": { + "slot0_version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "slot1_version": { + "$ref": "#/components/schemas/TufRepoVersion" + } + }, + "required": [ + "slot0_version", + "slot1_version" + ] + }, + "SpType": { + "description": "`SpType`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", + "type": "string", + "enum": [ + "sled", + "power", + "switch" + ] + }, + "SupportBundleCreate": { + "type": "object", + "properties": { + "user_comment": { + "nullable": true, + "description": "User comment for the support bundle", + "type": "string" } } - } - } - }, - "components": { - "schemas": { - "Error": { - "description": "Error information from a response.", + }, + "SupportBundleInfo": { "type": "object", "properties": { - "error_code": { + "id": { + "type": "string", + "format": "uuid" + }, + "reason_for_creation": { "type": "string" }, - "message": { + "reason_for_failure": { + "nullable": true, "type": "string" }, - "request_id": { + "state": { + "$ref": "#/components/schemas/SupportBundleState" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "user_comment": { + "nullable": true, "type": "string" } }, "required": [ - "message", - "request_id" + "id", + "reason_for_creation", + "state", + "time_created" ] }, - "Ping": { + "SupportBundleInfoResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "status": { - "description": "Whether the external API is reachable. Will always be Ok if the endpoint returns anything at all.", - "allOf": [ - { - "$ref": "#/components/schemas/PingStatus" + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SupportBundleInfo" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SupportBundleState": { + "oneOf": [ + { + "description": "Support Bundle still actively being collected.\n\nThis is the initial state for a Support Bundle, and it will automatically transition to either \"Failing\" or \"Active\".\n\nIf a user no longer wants to access a Support Bundle, they can request cancellation, which will transition to the \"Destroying\" state.", + "type": "string", + "enum": [ + "collecting" + ] + }, + { + "description": "Support Bundle is being destroyed.\n\nOnce backing storage has been freed, this bundle is destroyed.", + "type": "string", + "enum": [ + "destroying" + ] + }, + { + "description": "Support Bundle was not created successfully, or was created and has lost backing storage.\n\nThe record of the bundle still exists for readability, but the only valid operation on these bundles is to destroy them.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "Support Bundle has been processed, and is ready for usage.", + "type": "string", + "enum": [ + "active" + ] + } + ] + }, + "SupportBundleUpdate": { + "type": "object", + "properties": { + "user_comment": { + "nullable": true, + "description": "User comment for the support bundle", + "type": "string" + } + } + }, + "TufRepoVersion": { + "oneOf": [ + { + "type": "object", + "properties": { + "zone_status_version": { + "type": "string", + "enum": [ + "unknown" + ] + } + }, + "required": [ + "zone_status_version" + ] + }, + { + "type": "object", + "properties": { + "zone_status_version": { + "type": "string", + "enum": [ + "install_dataset" + ] + } + }, + "required": [ + "zone_status_version" + ] + }, + { + "type": "object", + "properties": { + "details": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + "zone_status_version": { + "type": "string", + "enum": [ + "version" + ] + } + }, + "required": [ + "details", + "zone_status_version" + ] + }, + { + "type": "object", + "properties": { + "details": { + "type": "string" + }, + "zone_status_version": { + "type": "string", + "enum": [ + "error" + ] } + }, + "required": [ + "details", + "zone_status_version" ] } + ] + }, + "TypedUuidForBlueprintKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForDatasetKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForDemoSagaKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForExternalIpKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForMupdateOverrideKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForOmicronZoneKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForPhysicalDiskKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForSledKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, + "UninitializedSled": { + "description": "A sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "rack_id": { + "type": "string", + "format": "uuid" + } }, "required": [ - "status" + "baseboard", + "cubby", + "rack_id" ] }, - "PingStatus": { + "UninitializedSledId": { + "description": "The unique hardware ID for a sled", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "serial" + ] + }, + "UninitializedSledResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/UninitializedSled" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "UpdateAttemptStatus": { + "description": "status of a single update attempt", "type": "string", "enum": [ - "ok" + "not_started", + "fetching_artifact", + "precheck", + "updating", + "update_waiting", + "post_update", + "post_update_wait", + "done" + ] + }, + "UpdateCompletedHow": { + "type": "string", + "enum": [ + "found_no_changes_needed", + "completed_update", + "waited_for_concurrent_update", + "took_over_concurrent_update" + ] + }, + "UpdateStatus": { + "type": "object", + "properties": { + "mgs_driven": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/MgsDrivenUpdateStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/MgsDrivenUpdateStatus" + }, + "uniqueItems": true + }, + "sleds": { + "title": "IdOrdMap", + "x-rust-type": { + "crate": "iddqd", + "parameters": [ + { + "$ref": "#/components/schemas/SledAgentUpdateStatus" + } + ], + "path": "iddqd::IdOrdMap", + "version": "*" + }, + "type": "array", + "items": { + "$ref": "#/components/schemas/SledAgentUpdateStatus" + }, + "uniqueItems": true + } + }, + "required": [ + "mgs_driven", + "sleds" + ] + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "WaitingStatus": { + "description": "externally-exposed status for waiting updates", + "type": "object", + "properties": { + "baseboard_id": { + "$ref": "#/components/schemas/BaseboardId" + }, + "nattempts_done": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "next_attempt_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "baseboard_id", + "nattempts_done", + "next_attempt_time" + ] + }, + "ZoneAddWaitingOn": { + "oneOf": [ + { + "description": "Waiting on one or more blockers (typically MUPdate-related reasons) to clear.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "blockers" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "ZoneStatus": { + "type": "object", + "properties": { + "version": { + "$ref": "#/components/schemas/TufRepoVersion" + }, + "zone_id": { + "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" + }, + "zone_type": { + "$ref": "#/components/schemas/OmicronZoneType" + } + }, + "required": [ + "version", + "zone_id", + "zone_type" + ] + }, + "ZoneUnsafeToShutdown": { + "description": "Zones which should not be shut down, because their lack of availability could be problematic for the successful functioning of the deployed system.", + "oneOf": [ + { + "type": "object", + "properties": { + "reason": { + "$ref": "#/components/schemas/CockroachdbUnsafeToShutdown" + }, + "type": { + "type": "string", + "enum": [ + "cockroachdb" + ] + } + }, + "required": [ + "reason", + "type" + ] + }, + { + "type": "object", + "properties": { + "synchronized_count": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "total_boundary_ntp_zones": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + }, + "required": [ + "synchronized_count", + "total_boundary_ntp_zones", + "type" + ] + }, + { + "type": "object", + "properties": { + "synchronized_count": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "total_internal_dns_zones": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + }, + "required": [ + "synchronized_count", + "total_internal_dns_zones", + "type" + ] + } + ] + }, + "ZoneUpdatesWaitingOn": { + "oneOf": [ + { + "description": "Waiting on discretionary zone placement.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "discretionary_zones" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting on updates to RoT / SP / Host OS / etc.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "pending_mgs_updates" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting on the same set of blockers zone adds are waiting on.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zone_add_blockers" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "ZoneWaitingToExpunge": { + "description": "Out-of-date zones which are not yet ready to be expunged.\n\nFor example, out-of-date Nexus zones should not be expunged until handoff has completed.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "nexus" + ] + }, + "zone_generation": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "type", + "zone_generation" + ] + } + ] + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + }, + "IdSortMode": { + "description": "Supported set of sort modes for scanning by id only.\n\nCurrently, we only support scanning in ascending order.", + "oneOf": [ + { + "description": "sort in increasing order of \"id\"", + "type": "string", + "enum": [ + "id_ascending" + ] + } ] + }, + "TimeAndIdSortMode": { + "description": "Supported set of sort modes for scanning by timestamp and ID", + "oneOf": [ + { + "description": "sort in increasing order of timestamp and ID, i.e., earliest first", + "type": "string", + "enum": [ + "time_and_id_ascending" + ] + }, + { + "description": "sort in increasing order of timestamp and ID, i.e., most recent first", + "type": "string", + "enum": [ + "time_and_id_descending" + ] + } + ] + }, + "TypedUuidForInstanceKind": { + "type": "string", + "format": "uuid" } }, "responses": { From e470fb888da2c79452da25dbb3db2f0bf524e373 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 24 Sep 2025 10:13:25 -0400 Subject: [PATCH 08/18] Complete `target-release` test simulating an entire update (#9059) Finishes the `target-release` `reconfigurator-test`, showing the simulate update walking through the process of starting new Nexus zones, waiting for handoff, then expunging the old Nexus zones. Has two tweaks: * Fixes a planning report off-by-one bug where we'd claim a zone was both out of date and expunged (or updated) within the same plan. * Adds a `set active-nexus-gen N` command to `reconfigurator-cli` to control Nexus handoff instead of assuming it completes instantly. Closes #8478 --------- Co-authored-by: Sean Klein --- dev-tools/reconfigurator-cli/src/lib.rs | 25 +- .../tests/input/cmds-target-release.txt | 38 +- .../tests/output/cmds-stdout | 3 + .../tests/output/cmds-target-release-stdout | 1062 ++++++++++++++++- nexus/reconfigurator/simulation/src/config.rs | 103 +- nexus/reconfigurator/simulation/src/state.rs | 3 + nexus/types/src/deployment/planning_report.rs | 12 + 7 files changed, 1176 insertions(+), 70 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 175863913df..350ebbd6793 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -150,6 +150,7 @@ impl ReconfiguratorSim { builder.set_internal_dns_version(parent_blueprint.internal_dns_version); builder.set_external_dns_version(parent_blueprint.external_dns_version); + let active_nexus_gen = state.config().active_nexus_zone_generation(); let mut active_nexus_zones = BTreeSet::new(); let mut not_yet_nexus_zones = BTreeSet::new(); @@ -177,13 +178,9 @@ impl ReconfiguratorSim { match &zone.zone_type { nexus_types::deployment::BlueprintZoneType::Nexus(nexus) => { - if nexus.nexus_generation - == parent_blueprint.nexus_generation - { + if nexus.nexus_generation == active_nexus_gen { active_nexus_zones.insert(zone.id); - } else if nexus.nexus_generation - > parent_blueprint.nexus_generation - { + } else if nexus.nexus_generation > active_nexus_gen { not_yet_nexus_zones.insert(zone.id); } } @@ -191,6 +188,13 @@ impl ReconfiguratorSim { } } + if active_nexus_zones.is_empty() { + bail!( + "no Nexus zones found at current active generation \ + ({active_nexus_gen})" + ); + } + builder.set_active_nexus_zones(active_nexus_zones); builder.set_not_yet_nexus_zones(not_yet_nexus_zones); @@ -1185,6 +1189,9 @@ enum SetArgs { Seed { seed: String }, /// target number of Nexus instances (for planning) NumNexus { num_nexus: u16 }, + /// specify the generation of Nexus zones that are considered active when + /// running the blueprint planner + ActiveNexusGen { gen: Generation }, /// system's external DNS zone name (suffix) ExternalDnsZoneName { zone_name: String }, /// system target release @@ -2736,6 +2743,12 @@ fn cmd_set( .target_nexus_zone_count(usize::from(num_nexus)); rv } + SetArgs::ActiveNexusGen { gen } => { + let rv = + format!("will use active Nexus zones from generation {gen}"); + state.config_mut().set_active_nexus_zone_generation(gen); + rv + } SetArgs::ExternalDnsZoneName { zone_name } => { let rv = format!( "external DNS zone name: {:?} -> {:?}", diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index 3d0d1d0c382..eff1ed4a5de 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -446,26 +446,50 @@ blueprint-diff latest sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest inventory-generate -# Add Nexus zones on three sleds +# Start the Nexus handoff process: A planning step here should place three new +# Nexus zones, each running from the new TUF repo artifact. blueprint-plan latest latest blueprint-diff latest -# Propagate configs to the sleds which should be running Nexus +# Update inventory to reflect the three Nexus zones starting. sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest inventory-generate -# Update the Nexus generation from 1 -> 2, initiating -# quiesce of the old Nexuses +# Planning now should bump the top-level `nexus_generation` to 2, indicating +# that we want handoff to begin. blueprint-plan latest latest blueprint-diff latest -# Expunge three Nexus zones, one at a time +# Planning again should make no changes: we haven't yet performed a handoff, +# so the old Nexus zones are still in charge. blueprint-plan latest latest +blueprint-diff latest + +# Bump the set of active Nexus zones; this simulates Nexus handoff. +set active-nexus-gen 2 + +# Planning should now expunge one of the old Nexus zones. blueprint-plan latest latest +blueprint-diff latest + +# Two more planning iterations should expunge the remaining two old install +# dataset Nexus zones. +blueprint-plan latest latest +blueprint-diff latest blueprint-plan latest latest +blueprint-diff latest -# Attempt to plan one more blueprint. -# There should be no changes attempted here. +# Update sled configs and inventory to reflect the expunged Nexus zones. +sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +inventory-generate + +# Planning now should note that the old zones are gone, but do nothing else: +# the update is complete. All in-service zones have image sources from the new +# TUF repo, and all zones using old images are expunged. blueprint-plan latest latest +blueprint-diff latest +blueprint-show latest diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout index 9fa0f4469bc..d928df99716 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout @@ -863,6 +863,8 @@ wiped system > load state.json loaded data from "state.json" +warnings: + could not determine active Nexus generation from serialized state: no target blueprint set (using default of 1) result: system: using collection 6e066695-94bc-4250-bd63-fd799c166cc1 as source of sled inventory data @@ -875,6 +877,7 @@ result: config: configured external DNS zone name: oxide.example configured silo names: example-silo + active Nexus generation: 1 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index b89bbc53fb2..a3e75705373 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -1032,6 +1032,7 @@ result: config: configured external DNS zone name: oxide.example configured silo names: example-silo + active Nexus generation: 1 > # This should show the target release. @@ -2961,7 +2962,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) -* 25 remaining out-of-date zones +* 24 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3095,7 +3096,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 62620961-fc4a-481e-968b-f5acbac0dc63 (internal_ntp) -* 24 remaining out-of-date zones +* 23 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3369,7 +3370,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (external_dns) -* 23 remaining out-of-date zones +* 22 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3670,7 +3671,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 86a22a56-0168-453d-9df1-cb2a7c64b5d3 (crucible) -* 22 remaining out-of-date zones +* 21 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -3799,7 +3800,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (internal_dns) -* 21 remaining out-of-date zones +* 20 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4106,7 +4107,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (crucible_pantry) -* 20 remaining out-of-date zones +* 19 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4389,7 +4390,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone bd354eef-d8a6-4165-9124-283fb5e46d77 (crucible) -* 19 remaining out-of-date zones +* 18 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4523,7 +4524,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone e2fdefe7-95b2-4fd2-ae37-56929a06d58c (crucible) -* 18 remaining out-of-date zones +* 17 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4660,7 +4661,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 058fd5f9-60a8-4e11-9302-15172782e17d (crucible) -* 17 remaining out-of-date zones +* 16 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -4782,7 +4783,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 427ec88f-f467-42fa-9bbb-66a91a36103c (internal_dns) -* 16 remaining out-of-date zones +* 15 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5067,7 +5068,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (crucible) -* 15 remaining out-of-date zones +* 14 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5191,7 +5192,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 6444f8a5-6465-4f0b-a549-1993c113569c (internal_ntp) -* 14 remaining out-of-date zones +* 13 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5459,7 +5460,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (external_dns) -* 13 remaining out-of-date zones +* 12 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5754,7 +5755,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (crucible_pantry) -* 12 remaining out-of-date zones +* 11 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6031,7 +6032,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone dfac80b4-a887-430a-ae87-a4e065dba787 (crucible) -* 11 remaining out-of-date zones +* 10 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6165,7 +6166,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 694bd14f-cb24-4be4-bb19-876e79cda2c8 (crucible) -* 10 remaining out-of-date zones +* 9 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6287,7 +6288,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (crucible_pantry) -* 9 remaining out-of-date zones +* 8 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6548,7 +6549,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 7c252b64-c5af-4ec1-989e-9a03f3b0f111 (crucible) -* 8 remaining out-of-date zones +* 7 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6671,7 +6672,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (internal_dns) -* 7 remaining out-of-date zones +* 6 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -6954,7 +6955,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (internal_ntp) -* 6 remaining out-of-date zones +* 5 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -7226,7 +7227,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone updated in-place: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f55647d4-5500-4ad3-893a-df45bd50d622 (crucible) -* 5 remaining out-of-date zones +* 4 remaining out-of-date zones * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -7354,7 +7355,7 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts * 1 out-of-date zone expunged: * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (external_dns) -* 4 remaining out-of-date zones +* 3 remaining out-of-date zones * waiting to update top-level nexus_generation: new Nexus zones have not been planned yet @@ -7629,7 +7630,8 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest bluepri generated inventory collection c0548a65-0ee4-4876-a5e0-3384187c88cc from configured sleds -> # Add Nexus zones on three sleds +> # Start the Nexus handoff process: A planning step here should place three new +> # Nexus zones, each running from the new TUF repo artifact. > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -7937,7 +7939,7 @@ external DNS: -> # Propagate configs to the sleds which should be running Nexus +> # Update inventory to reflect the three Nexus zones starting. > sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (d69e1109-06be-4469-8876-4292dc7885d7) @@ -7951,8 +7953,8 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest bluepri generated inventory collection 7349431d-718a-4353-a1ee-357ce2aeeb28 from configured sleds -> # Update the Nexus generation from 1 -> 2, initiating -> # quiesce of the old Nexuses +> # Planning now should bump the top-level `nexus_generation` to 2, indicating +> # that we want handoff to begin. > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -8009,7 +8011,8 @@ external DNS: -> # Expunge three Nexus zones, one at a time +> # Planning again should make no changes: we haven't yet performed a handoff, +> # so the old Nexus zones are still in charge. > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -8027,14 +8030,52 @@ planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts -* 1 out-of-date zone expunged: - * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus) * 3 remaining out-of-date zones +* 3 zones waiting to be expunged: + * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus): image out-of-date, but zone's nexus_generation 1 is still active + + + +> blueprint-diff latest +from: blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 +to: blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 2 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 54 (records: 71) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 12) + + +> # Bump the set of active Nexus zones; this simulates Nexus handoff. +> set active-nexus-gen 2 +will use active Nexus zones from generation 2 + +> # Planning should now expunge one of the old Nexus zones. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -8047,19 +8088,154 @@ INFO ran out of boards for MGS-driven update generated blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 based on parent blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 blueprint source: planner with report: planning report: -* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 10 zones are already from artifacts * skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 1 out-of-date zone expunged: - * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus) + * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus) * 2 remaining out-of-date zones +> blueprint-diff latest +from: blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 +to: blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 17 -> 18): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_9ae90740-7fdb-4073-ae43-048f2fca3d69 91f7f766-931f-43ef-99b4-2399f58c946b in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 +* oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 1.0.0 in service fd00:1122:3344:102::27 + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 9ae90740-7fdb-4073-ae43-048f2fca3d69 artifact: version 1.0.0 in service fd00:1122:3344:102::2c +* nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 - in service fd00:1122:3344:102::22 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 2 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) +- AAAA fd00:1122:3344:102::22 +* name: _nexus._tcp (records: 6 -> 5) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal +- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal +- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal +- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal ++ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal + unchanged names: 52 (records: 64) + +external DNS: +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 6 -> 5) +- A 192.0.2.2 +- A 192.0.2.7 +- A 192.0.2.3 +- A 192.0.2.6 +- A 192.0.2.4 +- A 192.0.2.5 ++ A 192.0.2.7 ++ A 192.0.2.3 ++ A 192.0.2.6 ++ A 192.0.2.4 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) + + + + +> # Two more planning iterations should expunge the remaining two old install +> # dataset Nexus zones. > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -8071,34 +8247,812 @@ generated blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf based on parent bluepri blueprint source: planner with report: planning report: * skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts -* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 9 zones are already from artifacts * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 1 out-of-date zone expunged: - * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus) + * sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus) * 1 remaining out-of-date zone +> blueprint-diff latest +from: blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 +to: blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 16 -> 17): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_d516d61b-fd96-46ad-a743-78eec814ee90 d2707e9c-d793-4a6c-8f77-2d3aa5a98390 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 +* oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 1.0.0 in service fd00:1122:3344:101::26 + crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::24 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus d516d61b-fd96-46ad-a743-78eec814ee90 artifact: version 1.0.0 in service fd00:1122:3344:101::2b +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 - in service fd00:1122:3344:101::22 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 2 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) +- AAAA fd00:1122:3344:101::22 +* name: _nexus._tcp (records: 5 -> 4) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal +- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal +- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal ++ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal + unchanged names: 51 (records: 63) + +external DNS: +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 5 -> 4) +- A 192.0.2.7 +- A 192.0.2.3 +- A 192.0.2.6 +- A 192.0.2.4 +- A 192.0.2.5 ++ A 192.0.2.7 ++ A 192.0.2.6 ++ A 192.0.2.4 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) + + + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b based on parent blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf +blueprint source: planner with report: +planning report: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts +* 1 out-of-date zone expunged: + * sled d81c6a84-79b8-4958-ae41-ea46c9b19763, zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus) +* 0 remaining out-of-date zones + + + +> blueprint-diff latest +from: blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf +to: blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 16 -> 17): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 0f0c883a-7a84-4064-b085-0af035edfb3d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 bdadb3c9-f786-4fcd-a632-667a2e359065 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad ecb27354-877c-4a24-8982-1298a222ca1c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 +* oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 - in service none none off + └─ + expunged + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad artifact: version 1.0.0 in service fd00:1122:3344:103::2b +* nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 - in service fd00:1122:3344:103::22 + └─ + expunged ⏳ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 2 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": +- name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) +- AAAA fd00:1122:3344:103::22 +* name: _nexus._tcp (records: 4 -> 3) +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal +- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal +- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal ++ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal + unchanged names: 50 (records: 62) + +external DNS: +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 4 -> 3) +- A 192.0.2.7 +- A 192.0.2.6 +- A 192.0.2.4 +- A 192.0.2.5 ++ A 192.0.2.7 ++ A 192.0.2.6 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) + + + + +> # Update sled configs and inventory to reflect the expunged Nexus zones. +> sled-set 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron-config latest +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c omicron config from latest blueprint (e31c9054-8549-4c68-acf9-a01f68d1fc9b) + +> sled-set 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron-config latest +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 omicron config from latest blueprint (e31c9054-8549-4c68-acf9-a01f68d1fc9b) + +> sled-set d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron-config latest +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 omicron config from latest blueprint (e31c9054-8549-4c68-acf9-a01f68d1fc9b) + +> inventory-generate +generated inventory collection 5ffc4b23-0e6e-40e9-bacd-f83ca33f9792 from configured sleds + + +> # Planning now should note that the old zones are gone, but do nothing else: +> # the update is complete. All in-service zones have image sources from the new +> # TUF repo, and all zones using old images are expunged. +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 +INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 +INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 +INFO ran out of boards for MGS-driven update +generated blueprint 3e332949-6785-4aff-ad86-6134f5ce6152 based on parent blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b +blueprint source: planner with report: +planning report: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts + + + +> blueprint-diff latest +from: blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b +to: blueprint 3e332949-6785-4aff-ad86-6134f5ce6152 + + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 18): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 expunged none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_9ae90740-7fdb-4073-ae43-048f2fca3d69 91f7f766-931f-43ef-99b4-2399f58c946b in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 1.0.0 in service fd00:1122:3344:102::27 + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 9ae90740-7fdb-4073-ae43-048f2fca3d69 artifact: version 1.0.0 in service fd00:1122:3344:102::2c +* nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:102::22 + └─ + expunged ✓ + + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 17): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c expunged none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_d516d61b-fd96-46ad-a743-78eec814ee90 d2707e9c-d793-4a6c-8f77-2d3aa5a98390 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 1.0.0 in service fd00:1122:3344:101::26 + crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::24 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus d516d61b-fd96-46ad-a743-78eec814ee90 artifact: version 1.0.0 in service fd00:1122:3344:101::2b +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:101::22 + └─ + expunged ✓ + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 17): + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 0f0c883a-7a84-4064-b085-0af035edfb3d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 bdadb3c9-f786-4fcd-a632-667a2e359065 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 expunged none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad ecb27354-877c-4a24-8982-1298a222ca1c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + -------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + -------------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad artifact: version 1.0.0 in service fd00:1122:3344:103::2b +* nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 - expunged ⏳ fd00:1122:3344:103::22 + └─ + expunged ✓ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 2 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + +> blueprint-show latest +blueprint 3e332949-6785-4aff-ad86-6134f5ce6152 +parent: e31c9054-8549-4c68-acf9-a01f68d1fc9b + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 18) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 22a4acd6-9d38-43e2-a3bf-c85f5c2f3246 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns bccbe44f-bcf8-4868-b086-c8901f896cdc in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_156bfcde-e3fa-4abe-a93e-eb4a408b4e5e 7f2ba73c-1f57-4f92-8059-738059808061 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_4ab0ec67-b27e-42b5-af22-9117ad11113b 75ffc8e6-b071-4f51-966d-4a6e6b01b432 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_698d1d82-0620-4978-93ac-0ba5d40f3da9 dfe5586b-e4a8-4b98-ad72-eabc34988177 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 expunged none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_9ae90740-7fdb-4073-ae43-048f2fca3d69 91f7f766-931f-43ef-99b4-2399f58c946b in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 expunged none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_ba87399e-e9b7-4ee4-8cb7-0032822630e9 484f151e-c290-48bd-99b2-c97ef85c9844 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 artifact: version 1.0.0 in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 artifact: version 1.0.0 in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 artifact: version 1.0.0 in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c artifact: version 1.0.0 in service fd00:1122:3344:102::27 + crucible_pantry 156bfcde-e3fa-4abe-a93e-eb4a408b4e5e artifact: version 1.0.0 in service fd00:1122:3344:102::2b + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::25 + external_dns 4ab0ec67-b27e-42b5-af22-9117ad11113b artifact: version 1.0.0 in service fd00:1122:3344:102::2a + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::24 + internal_dns 698d1d82-0620-4978-93ac-0ba5d40f3da9 artifact: version 1.0.0 in service fd00:1122:3344:1::1 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca artifact: version 0.0.1 expunged ✓ fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::21 + internal_ntp ba87399e-e9b7-4ee4-8cb7-0032822630e9 artifact: version 1.0.0 in service fd00:1122:3344:102::29 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec artifact: version 0.0.1 expunged ✓ fd00:1122:3344:102::22 + nexus 9ae90740-7fdb-4073-ae43-048f2fca3d69 artifact: version 1.0.0 in service fd00:1122:3344:102::2c + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 17) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns d7f95803-df03-49e0-9ad2-37de73f01417 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns 68e4149e-114c-460e-8f33-54dd5f9a274e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_7e83b92d-6b02-47f3-a5ab-125a6bb44e29 7562b5c7-041c-44e8-9ae7-e453b63f5b03 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_26bdd109-c842-43a9-95cb-15aba9b0832b 16a8c618-d062-4bde-8ca4-301b5f14ccf2 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_abd27551-4027-4084-8b52-13a575b035b4 6119babc-cdc9-4b6b-afa4-9037eee05728 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c expunged none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_d516d61b-fd96-46ad-a743-78eec814ee90 d2707e9c-d793-4a6c-8f77-2d3aa5a98390 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 expunged none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_e14f91b0-0c41-48a0-919d-e5078d2b89b0 312286f1-e378-464d-97cb-6fa06ba2dab7 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + crucible 058fd5f9-60a8-4e11-9302-15172782e17d artifact: version 1.0.0 in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 artifact: version 1.0.0 in service fd00:1122:3344:101::26 + crucible_pantry 7e83b92d-6b02-47f3-a5ab-125a6bb44e29 artifact: version 1.0.0 in service fd00:1122:3344:101::2a + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::24 + external_dns 26bdd109-c842-43a9-95cb-15aba9b0832b artifact: version 1.0.0 in service fd00:1122:3344:101::29 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:2::1 + internal_dns abd27551-4027-4084-8b52-13a575b035b4 artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::21 + internal_ntp e14f91b0-0c41-48a0-919d-e5078d2b89b0 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:101::22 + nexus d516d61b-fd96-46ad-a743-78eec814ee90 artifact: version 1.0.0 in service fd00:1122:3344:101::2b + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 17) + + host phase 2 contents: + ------------------------------ + slot boot image source + ------------------------------ + A current contents + B artifact: version 1.0.0 + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 0f0c883a-7a84-4064-b085-0af035edfb3d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 4d674b3e-8209-4323-b7bf-e666028a9a04 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_9464c6ed-ffa6-4e88-ae4e-76551d82b2af e86213aa-79a3-4beb-a886-3441401a8519 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 bdadb3c9-f786-4fcd-a632-667a2e359065 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_d5fd048a-8786-42d3-938e-820eae95d7f4 c22b409e-662b-48a9-ac70-29f7487aa6b1 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 expunged none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad ecb27354-877c-4a24-8982-1298a222ca1c in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_9e2e0774-3cf6-4f75-9a12-92db05c77b81 125cb821-0867-4291-af2c-3756ac3cfc6d in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 expunged none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------ + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 artifact: version 1.0.0 in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 artifact: version 1.0.0 in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 1.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 9464c6ed-ffa6-4e88-ae4e-76551d82b2af artifact: version 1.0.0 in service fd00:1122:3344:103::28 + external_dns 63ff80e3-ef8d-4186-8240-5ebd3f7d4d82 artifact: version 1.0.0 in service fd00:1122:3344:103::2a + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::23 + internal_dns d5fd048a-8786-42d3-938e-820eae95d7f4 artifact: version 1.0.0 in service fd00:1122:3344:3::1 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:3::1 + internal_ntp 9e2e0774-3cf6-4f75-9a12-92db05c77b81 artifact: version 1.0.0 in service fd00:1122:3344:103::29 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 0.0.1 expunged ✓ fd00:1122:3344:103::22 + nexus 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad artifact: version 1.0.0 in service fd00:1122:3344:103::2b + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: (none) + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 1 + nexus gen:::::::::::::: 2 + + PENDING MGS-MANAGED UPDATES: 0 + +blueprint source: planner with report: +planning report: +* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts +* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts +* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts -> # Attempt to plan one more blueprint. -> # There should be no changes attempted here. -> blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 9, num_eligible: 0, num_ineligible: 0 -INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 -INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 8, num_eligible: 0, num_ineligible: 0 -INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 -INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 -INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 -INFO ran out of boards for MGS-driven update -generated blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b based on parent blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf -blueprint source: planner with report: -planning report: -* skipping noop zone image source check on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: all 9 zones are already from artifacts -* skipping noop zone image source check on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: all 8 zones are already from artifacts -* skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 8 zones are already from artifacts diff --git a/nexus/reconfigurator/simulation/src/config.rs b/nexus/reconfigurator/simulation/src/config.rs index 39ff02f5bf1..80fc80fe2b0 100644 --- a/nexus/reconfigurator/simulation/src/config.rs +++ b/nexus/reconfigurator/simulation/src/config.rs @@ -2,10 +2,12 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use std::fmt; +use std::{collections::BTreeSet, fmt}; use indexmap::IndexSet; -use omicron_common::api::external::Name; +use nexus_types::deployment::{Blueprint, BlueprintTarget}; +use omicron_common::api::external::{Generation, Name}; +use omicron_uuid_kinds::OmicronZoneUuid; use crate::{ LoadSerializedResultBuilder, @@ -37,6 +39,10 @@ pub struct SimConfig { /// We can likely make this better after addressing /// . num_nexus: Option, + + /// The Nexus generation to treat as the active set for the purposes of + /// simulating handoff between updates. + active_nexus_zone_generation: Generation, } impl SimConfig { @@ -48,6 +54,7 @@ impl SimConfig { .collect(), external_dns_zone_name: String::from("oxide.example"), num_nexus: None, + active_nexus_zone_generation: Generation::new(), } } @@ -66,6 +73,11 @@ impl SimConfig { self.num_nexus } + #[inline] + pub fn active_nexus_zone_generation(&self) -> Generation { + self.active_nexus_zone_generation + } + pub(crate) fn to_mut(&self) -> SimConfigBuilder { SimConfigBuilder { inner: SimConfigBuilderInner { config: self.clone() }, @@ -110,11 +122,17 @@ impl SimConfigBuilder { &mut self, external_dns_zone_names: Vec, silo_names: Vec, + active_nexus_zones: &BTreeSet, + target_blueprint: Option<&BlueprintTarget>, + all_blueprints: &[Blueprint], res: &mut LoadSerializedResultBuilder, ) -> LoadSerializedConfigResult { self.inner.load_serialized_inner( external_dns_zone_names, silo_names, + active_nexus_zones, + target_blueprint, + all_blueprints, res, ) } @@ -141,6 +159,11 @@ impl SimConfigBuilder { self.log.push(SimConfigLogEntry::SetNumNexus(num_nexus)); } + pub fn set_active_nexus_zone_generation(&mut self, gen: Generation) { + self.inner.set_active_nexus_zone_generation(gen); + self.log.push(SimConfigLogEntry::SetActiveNexusZoneGeneration(gen)); + } + pub fn wipe(&mut self) { self.inner.wipe_inner(); self.log.push(SimConfigLogEntry::Wipe); @@ -159,6 +182,7 @@ pub enum SimConfigLogEntry { SetSiloNames(IndexSet), SetExternalDnsZoneName(String), SetNumNexus(u16), + SetActiveNexusZoneGeneration(Generation), Wipe, } @@ -171,6 +195,9 @@ pub struct LoadSerializedConfigResult { /// The silo names loaded. pub silo_names: Vec, + + /// The generation of active Nexus zones loaded. + pub active_nexus_generation: Generation, } impl fmt::Display for LoadSerializedConfigResult { @@ -187,6 +214,12 @@ impl fmt::Display for LoadSerializedConfigResult { join_comma_or_none(&self.silo_names) )?; + writeln!( + f, + "active Nexus generation: {}", + self.active_nexus_generation, + )?; + Ok(()) } } @@ -205,6 +238,9 @@ impl SimConfigBuilderInner { &mut self, external_dns_zone_names: Vec, silo_names: Vec, + active_nexus_zones: &BTreeSet, + target_blueprint: Option<&BlueprintTarget>, + all_blueprints: &[Blueprint], res: &mut LoadSerializedResultBuilder, ) -> LoadSerializedConfigResult { let nnames = external_dns_zone_names.len(); @@ -226,7 +262,30 @@ impl SimConfigBuilderInner { self.set_silo_names_inner(silo_names.clone()); - LoadSerializedConfigResult { external_dns_zone_name, silo_names } + // Determine the active Nexus generation by comparing the current set of + // active Nexus zones to the current target blueprint. + let active_nexus_generation = match determine_active_nexus_generation( + active_nexus_zones, + target_blueprint, + all_blueprints, + ) { + Ok(generation) => generation, + Err(message) => { + res.warnings.push(format!( + "could not determine active Nexus \ + generation from serialized state: \ + {message} (using default of 1)" + )); + Generation::new() + } + }; + self.set_active_nexus_zone_generation(active_nexus_generation); + + LoadSerializedConfigResult { + external_dns_zone_name, + silo_names, + active_nexus_generation, + } } // Not public: the only caller of this is load_serialized. @@ -257,7 +316,45 @@ impl SimConfigBuilderInner { self.config.num_nexus = Some(num_nexus); } + fn set_active_nexus_zone_generation(&mut self, gen: Generation) { + self.config.active_nexus_zone_generation = gen; + } + fn wipe_inner(&mut self) { self.config = SimConfig::new(); } } + +fn determine_active_nexus_generation( + active_nexus_zones: &BTreeSet, + target_blueprint: Option<&BlueprintTarget>, + all_blueprints: &[Blueprint], +) -> Result { + let Some(target_blueprint) = target_blueprint else { + return Err("no target blueprint set".to_string()); + }; + + let Some(blueprint) = + all_blueprints.iter().find(|bp| bp.id == target_blueprint.target_id) + else { + return Err(format!( + "target blueprint {} not found", + target_blueprint.target_id + )); + }; + + let maybe_gen = blueprint + .find_generation_for_nexus(active_nexus_zones) + .map_err(|err| format!("{err:#}"))?; + + maybe_gen.ok_or_else(|| { + format!( + "could not find Nexus zones in current target blueprint: {:?}", + active_nexus_zones + .iter() + .map(|z| z.to_string()) + .collect::>() + .join(", ") + ) + }) +} diff --git a/nexus/reconfigurator/simulation/src/state.rs b/nexus/reconfigurator/simulation/src/state.rs index 1bdf010dfc9..737814bafd2 100644 --- a/nexus/reconfigurator/simulation/src/state.rs +++ b/nexus/reconfigurator/simulation/src/state.rs @@ -236,6 +236,9 @@ impl SimStateBuilder { let config = self.config.load_serialized( state.external_dns_zone_names.clone(), state.silo_names.clone(), + state.planning_input.active_nexus_zones(), + state.target_blueprint.as_ref(), + &state.blueprints, &mut res, ); let system = diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index f28a62441e9..5fb304bb0bd 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -889,6 +889,12 @@ impl PlanningZoneUpdatesStepReport { .entry(sled_id) .and_modify(|zones| zones.push(zone_config.to_owned())) .or_insert_with(|| vec![zone_config.to_owned()]); + + // We check for out-of-date zones before expunging zones. If we just + // expunged this zone, it's no longer out of date. + if let Some(out_of_date) = self.out_of_date_zones.get_mut(&sled_id) { + out_of_date.retain(|z| z.zone_config.id != zone_config.id); + } } pub fn updated_zone( @@ -900,6 +906,12 @@ impl PlanningZoneUpdatesStepReport { .entry(sled_id) .and_modify(|zones| zones.push(zone_config.to_owned())) .or_insert_with(|| vec![zone_config.to_owned()]); + + // We check for out-of-date zones before updating zones. If we just + // updated this zone, it's no longer out of date. + if let Some(out_of_date) = self.out_of_date_zones.get_mut(&sled_id) { + out_of_date.retain(|z| z.zone_config.id != zone_config.id); + } } pub fn unsafe_zone( From 83b88dac8beb4a4bcdb0bece1965bb3a006c9655 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 24 Sep 2025 10:35:56 -0700 Subject: [PATCH 09/18] [planner] Identify when zone update is blocked on inventory (#9063) Fixes https://github.com/oxidecomputer/omicron/issues/9047 --------- Co-authored-by: Alex Plotnick --- .../tests/output/cmds-mupdate-update-flow-stdout | 1 + .../tests/output/cmds-target-release-stdout | 3 +++ nexus/reconfigurator/planning/src/planner.rs | 8 +++++--- nexus/types/src/deployment/planning_report.rs | 4 ++++ openapi/nexus-internal.json | 15 +++++++++++++++ 5 files changed, 28 insertions(+), 3 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 1b7ce320057..5bd624087e4 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -2186,6 +2186,7 @@ planning report: * noop converting host phase 2 slot B to Artifact on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting host phase 2 slot B to Artifact on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 * only placed 0/1 desired nexus zones +* zone updates waiting on zone propagation to inventory * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index a3e75705373..3b811047cd2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -3237,6 +3237,7 @@ planning report: * waiting for NTP zones to appear in inventory on sleds: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * missing NTP zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +* zone updates waiting on zone propagation to inventory * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -5327,6 +5328,7 @@ planning report: * waiting for NTP zones to appear in inventory on sleds: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * missing NTP zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +* zone updates waiting on zone propagation to inventory * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated @@ -7092,6 +7094,7 @@ planning report: * waiting for NTP zones to appear in inventory on sleds: d81c6a84-79b8-4958-ae41-ea46c9b19763 * sleds getting NTP zones and which have other services already, making them eligible for discretionary zones: d81c6a84-79b8-4958-ae41-ea46c9b19763 * missing NTP zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 +* zone updates waiting on zone propagation to inventory * waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index dcad6a5366e..4e15b736782 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -1545,8 +1545,6 @@ impl<'a> Planner<'a> { &mut self, mgs_updates: &PlanningMgsUpdatesStepReport, ) -> Result { - let mut report = PlanningZoneUpdatesStepReport::new(); - let zones_currently_updating = self.get_zones_not_yet_propagated_to_inventory(); if !zones_currently_updating.is_empty() { @@ -1554,9 +1552,13 @@ impl<'a> Planner<'a> { self.log, "some zones not yet up-to-date"; "zones_currently_updating" => ?zones_currently_updating, ); - return Ok(report); + return Ok(PlanningZoneUpdatesStepReport::waiting_on( + ZoneUpdatesWaitingOn::InventoryPropagation, + )); } + let mut report = PlanningZoneUpdatesStepReport::new(); + // Find the zones with out-of-date images let out_of_date_zones = self.get_out_of_date_zones(); for (sled_id, zone, desired_image) in out_of_date_zones.iter() { diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index 5fb304bb0bd..d4c9ac4c33a 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -1023,6 +1023,9 @@ pub enum ZoneUpdatesWaitingOn { /// Waiting on discretionary zone placement. DiscretionaryZones, + /// Waiting on zones to propagate to inventory. + InventoryPropagation, + /// Waiting on updates to RoT / SP / Host OS / etc. PendingMgsUpdates, @@ -1034,6 +1037,7 @@ impl ZoneUpdatesWaitingOn { pub fn as_str(&self) -> &'static str { match self { Self::DiscretionaryZones => "discretionary zones", + Self::InventoryPropagation => "zone propagation to inventory", Self::PendingMgsUpdates => { "pending MGS updates (RoT / SP / Host OS / etc.)" } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 7b1d7ea36a9..a2320acb492 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -6013,6 +6013,21 @@ "type" ] }, + { + "description": "Waiting on zones to propagate to inventory.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "inventory_propagation" + ] + } + }, + "required": [ + "type" + ] + }, { "description": "Waiting on updates to RoT / SP / Host OS / etc.", "type": "object", From e295ba32d1a7f4dfd5264be9ac3d68c731f20482 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 24 Sep 2025 10:47:35 -0700 Subject: [PATCH 10/18] internal/external DNS should only contain records for active Nexus instances (#9060) --- Cargo.lock | 1 + dev-tools/reconfigurator-cli/src/lib.rs | 49 +++++ .../tests/input/cmds-target-release.txt | 3 + .../tests/output/cmds-nexus-generation-stdout | 65 +++--- .../tests/output/cmds-target-release-stdout | 184 ++++++---------- live-tests/Cargo.toml | 1 + live-tests/tests/test_nexus_handoff.rs | 199 ++++++++++++++++++ nexus/reconfigurator/execution/src/dns.rs | 25 ++- nexus/reconfigurator/execution/src/lib.rs | 8 + .../execution/src/test_utils.rs | 18 +- nexus/src/app/rack.rs | 1 + nexus/src/app/silo.rs | 9 +- nexus/types/src/deployment.rs | 23 ++ nexus/types/src/deployment/execution/dns.rs | 26 ++- nexus/types/src/deployment/execution/utils.rs | 23 +- 15 files changed, 445 insertions(+), 190 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0684b0c318..b1407514acf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7873,6 +7873,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", + "dns-service-client", "dropshot", "futures", "internal-dns-resolver", diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 350ebbd6793..5d03257a923 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -2443,14 +2443,26 @@ fn cmd_blueprint_diff( // each blueprint. To do that we need to construct a list of sleds suitable // for the executor. let sleds_by_id = make_sleds_by_id(state.system().description())?; + + // It's tricky to figure out which active Nexus generation number to use + // when diff'ing blueprints. What's currently active might be wholly + // different from what's here. (Imagine generation 7 is active and these + // blueprints are from Nexus generation 4.) What's most likely useful is + // picking the Nexus generation of the blueprint itself. + let blueprint1_active_nexus_generation = + blueprint_active_nexus_generation(&blueprint1); + let blueprint2_active_nexus_generation = + blueprint_active_nexus_generation(&blueprint2); let internal_dns_config1 = blueprint_internal_dns_config( &blueprint1, &sleds_by_id, + blueprint1_active_nexus_generation, &Default::default(), )?; let internal_dns_config2 = blueprint_internal_dns_config( &blueprint2, &sleds_by_id, + blueprint2_active_nexus_generation, &Default::default(), )?; let dns_diff = DnsDiff::new(&internal_dns_config1, &internal_dns_config2) @@ -2462,11 +2474,13 @@ fn cmd_blueprint_diff( &blueprint1, state.config().silo_names(), external_dns_zone_name.to_owned(), + blueprint1_active_nexus_generation, ); let external_dns_config2 = blueprint_external_dns_config( &blueprint2, state.config().silo_names(), external_dns_zone_name.to_owned(), + blueprint2_active_nexus_generation, ); let dns_diff = DnsDiff::new(&external_dns_config1, &external_dns_config2) .context("failed to assemble external DNS diff")?; @@ -2524,12 +2538,15 @@ fn cmd_blueprint_diff_dns( } }; + let blueprint_active_generation = + blueprint_active_nexus_generation(&blueprint); let blueprint_dns_zone = match dns_group { CliDnsGroup::Internal => { let sleds_by_id = make_sleds_by_id(state.system().description())?; blueprint_internal_dns_config( blueprint, &sleds_by_id, + blueprint_active_generation, &Default::default(), )? } @@ -2537,6 +2554,7 @@ fn cmd_blueprint_diff_dns( blueprint, state.config().silo_names(), state.config().external_dns_zone_name().to_owned(), + blueprint_active_generation, ), }; @@ -3005,9 +3023,12 @@ fn cmd_load_example( // Generate the internal and external DNS configs based on the blueprint. let sleds_by_id = make_sleds_by_id(&example.system)?; + let blueprint_nexus_generation = + blueprint_active_nexus_generation(&blueprint); let internal_dns = blueprint_internal_dns_config( &blueprint, &sleds_by_id, + blueprint_nexus_generation, &Default::default(), )?; let external_dns_zone_name = @@ -3016,6 +3037,7 @@ fn cmd_load_example( &blueprint, state.config_mut().silo_names(), external_dns_zone_name, + blueprint_nexus_generation, ); let blueprint_id = blueprint.id; @@ -3082,3 +3104,30 @@ fn cmd_file_contents(args: FileContentsArgs) -> anyhow::Result> { Ok(Some(s)) } + +/// Returns the "active Nexus generation" to use for a historical blueprint +/// (i.e., a blueprint that may not have been generated or executed against the +/// current simulated state). This is used for `blueprint-diff`, for example, +/// which avoids assuming anything about the simulated state in comparing the +/// two blueprints. +/// +/// In general, the active Nexus generation for a blueprint is not well-defined. +/// We cannot know what the active Nexus generation was at some point in the +/// past. But we do know that it's one of these two values: +/// +/// - `blueprint.nexus_generation - 1`, if this blueprint was created as part +/// of an upgrade, starting with the point where the Nexus handoff was +/// initiated (inclusive) and ending with the first blueprint after the +/// handoff (exclusive). In most cases, this means that this is the single +/// blueprint during an upgrade that triggered the handoff. +/// - `blueprint.nexus_generation` otherwise (which includes all other +/// blueprints that are created during an upgrade and all blueprints created +/// outside of an upgrade). +/// +/// This implementation always returns `blueprint.nexus_generation`. In the +/// second case above, this is always correct. In the first case, this is +/// basically equivalent to assuming that the Nexus handoff had happened +/// instantaneously when the blueprint was created. +fn blueprint_active_nexus_generation(blueprint: &Blueprint) -> Generation { + blueprint.nexus_generation +} diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt index eff1ed4a5de..e001e584178 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-target-release.txt @@ -459,6 +459,9 @@ inventory-generate # Planning now should bump the top-level `nexus_generation` to 2, indicating # that we want handoff to begin. +# Note that `blueprint-diff` will show the DNS changes as though the handoff has +# happened already because that command does not take into account simulated +# state. blueprint-plan latest latest blueprint-diff latest diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout index b1f813834a2..f055e9896f3 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-nexus-generation-stdout @@ -117,38 +117,12 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: -* DNS zone: "control-plane.oxide.internal": -+ name: 16a766ee-9400-4e67-9363-883670371a1b.host (records: 1) -+ AAAA fd00:1122:3344:101::28 -* name: _nexus-lockstep._tcp (records: 3 -> 4) -- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12232 16a766ee-9400-4e67-9363-883670371a1b.host.control-plane.oxide.internal -+ SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -* name: _nexus._tcp (records: 3 -> 4) -- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12221 16a766ee-9400-4e67-9363-883670371a1b.host.control-plane.oxide.internal -+ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal - unchanged names: 50 (records: 62) + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 52 (records: 68) external DNS: -* DNS zone: "oxide.example": -* name: example-silo.sys (records: 3 -> 4) -- A 192.0.2.2 -- A 192.0.2.3 -- A 192.0.2.4 -+ A 192.0.2.2 -+ A 192.0.2.3 -+ A 192.0.2.5 -+ A 192.0.2.4 - unchanged names: 4 (records: 6) + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) @@ -177,12 +151,35 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: - DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 53 (records: 71) +* DNS zone: "control-plane.oxide.internal": +- name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) +- AAAA fd00:1122:3344:101::22 ++ name: 16a766ee-9400-4e67-9363-883670371a1b.host (records: 1) ++ AAAA fd00:1122:3344:101::28 +- name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) +- AAAA fd00:1122:3344:103::22 +- name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) +- AAAA fd00:1122:3344:102::22 +* name: _nexus-lockstep._tcp (records: 3 -> 1) +- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 16a766ee-9400-4e67-9363-883670371a1b.host.control-plane.oxide.internal +* name: _nexus._tcp (records: 3 -> 1) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 16a766ee-9400-4e67-9363-883670371a1b.host.control-plane.oxide.internal + unchanged names: 47 (records: 59) external DNS: - DNS zone: "oxide.example" (unchanged) - unchanged names: 5 (records: 10) +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 3 -> 1) +- A 192.0.2.2 +- A 192.0.2.3 +- A 192.0.2.4 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 3b811047cd2..b9b99c77fce 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -7896,48 +7896,12 @@ to: blueprint d69e1109-06be-4469-8876-4292dc7885d7 internal DNS: -* DNS zone: "control-plane.oxide.internal": -+ name: 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host (records: 1) -+ AAAA fd00:1122:3344:103::2b -+ name: 9ae90740-7fdb-4073-ae43-048f2fca3d69.host (records: 1) -+ AAAA fd00:1122:3344:102::2c -* name: _nexus-lockstep._tcp (records: 3 -> 6) -- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12232 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -+ SRV port 12232 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -+ SRV port 12232 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal -* name: _nexus._tcp (records: 3 -> 6) -- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -+ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -+ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -+ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal -+ name: d516d61b-fd96-46ad-a743-78eec814ee90.host (records: 1) -+ AAAA fd00:1122:3344:101::2b - unchanged names: 50 (records: 62) + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 52 (records: 68) external DNS: -* DNS zone: "oxide.example": -* name: example-silo.sys (records: 3 -> 6) -- A 192.0.2.2 -- A 192.0.2.3 -- A 192.0.2.4 -+ A 192.0.2.2 -+ A 192.0.2.7 -+ A 192.0.2.3 -+ A 192.0.2.6 -+ A 192.0.2.4 -+ A 192.0.2.5 - unchanged names: 4 (records: 6) + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) @@ -7958,6 +7922,9 @@ generated inventory collection 7349431d-718a-4353-a1ee-357ce2aeeb28 from configu > # Planning now should bump the top-level `nexus_generation` to 2, indicating > # that we want handoff to begin. +> # Note that `blueprint-diff` will show the DNS changes as though the handoff has +> # happened already because that command does not take into account simulated +> # state. > blueprint-plan latest latest INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 10, num_eligible: 0, num_ineligible: 0 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a @@ -8004,12 +7971,45 @@ to: blueprint 12f19448-6a10-4b4b-ae19-a8c94a566097 internal DNS: - DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 55 (records: 77) +* DNS zone: "control-plane.oxide.internal": +- name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) +- AAAA fd00:1122:3344:101::22 +- name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) +- AAAA fd00:1122:3344:103::22 +- name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) +- AAAA fd00:1122:3344:102::22 ++ name: 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host (records: 1) ++ AAAA fd00:1122:3344:103::2b ++ name: 9ae90740-7fdb-4073-ae43-048f2fca3d69.host (records: 1) ++ AAAA fd00:1122:3344:102::2c +* name: _nexus-lockstep._tcp (records: 3 -> 3) +- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12232 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12232 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal +* name: _nexus._tcp (records: 3 -> 3) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal ++ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal ++ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal ++ name: d516d61b-fd96-46ad-a743-78eec814ee90.host (records: 1) ++ AAAA fd00:1122:3344:101::2b + unchanged names: 47 (records: 59) external DNS: - DNS zone: "oxide.example" (unchanged) - unchanged names: 5 (records: 12) +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 3 -> 3) +- A 192.0.2.2 +- A 192.0.2.3 +- A 192.0.2.4 ++ A 192.0.2.7 ++ A 192.0.2.6 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) @@ -8062,11 +8062,11 @@ to: blueprint 4713f6c4-e8ba-4a28-87a0-df75ebf7b8b6 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 54 (records: 71) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) - unchanged names: 5 (records: 12) + unchanged names: 5 (records: 9) @@ -8197,38 +8197,12 @@ to: blueprint 6d830d26-547e-492b-adfe-c5c4ad9c3751 internal DNS: -* DNS zone: "control-plane.oxide.internal": -- name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) -- AAAA fd00:1122:3344:102::22 -* name: _nexus._tcp (records: 6 -> 5) -- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal -- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal -+ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -+ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -+ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -+ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal - unchanged names: 52 (records: 64) + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 52 (records: 68) external DNS: -* DNS zone: "oxide.example": -* name: example-silo.sys (records: 6 -> 5) -- A 192.0.2.2 -- A 192.0.2.7 -- A 192.0.2.3 -- A 192.0.2.6 -- A 192.0.2.4 -- A 192.0.2.5 -+ A 192.0.2.7 -+ A 192.0.2.3 -+ A 192.0.2.6 -+ A 192.0.2.4 -+ A 192.0.2.5 - unchanged names: 4 (records: 6) + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) @@ -8352,34 +8326,12 @@ to: blueprint 8e0cc787-e068-4a45-97ed-21029cbe4ddf internal DNS: -* DNS zone: "control-plane.oxide.internal": -- name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) -- AAAA fd00:1122:3344:101::22 -* name: _nexus._tcp (records: 5 -> 4) -- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal -- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal -+ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -+ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -+ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -+ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal - unchanged names: 51 (records: 63) + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 52 (records: 68) external DNS: -* DNS zone: "oxide.example": -* name: example-silo.sys (records: 5 -> 4) -- A 192.0.2.7 -- A 192.0.2.3 -- A 192.0.2.6 -- A 192.0.2.4 -- A 192.0.2.5 -+ A 192.0.2.7 -+ A 192.0.2.6 -+ A 192.0.2.4 -+ A 192.0.2.5 - unchanged names: 4 (records: 6) + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) @@ -8500,30 +8452,12 @@ to: blueprint e31c9054-8549-4c68-acf9-a01f68d1fc9b internal DNS: -* DNS zone: "control-plane.oxide.internal": -- name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) -- AAAA fd00:1122:3344:103::22 -* name: _nexus._tcp (records: 4 -> 3) -- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal -- SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -- SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -- SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal -+ SRV port 12221 90dbd6f3-9bcb-4a62-ad85-d61f5b3a36ad.host.control-plane.oxide.internal -+ SRV port 12221 9ae90740-7fdb-4073-ae43-048f2fca3d69.host.control-plane.oxide.internal -+ SRV port 12221 d516d61b-fd96-46ad-a743-78eec814ee90.host.control-plane.oxide.internal - unchanged names: 50 (records: 62) + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 52 (records: 68) external DNS: -* DNS zone: "oxide.example": -* name: example-silo.sys (records: 4 -> 3) -- A 192.0.2.7 -- A 192.0.2.6 -- A 192.0.2.4 -- A 192.0.2.5 -+ A 192.0.2.7 -+ A 192.0.2.6 -+ A 192.0.2.5 - unchanged names: 4 (records: 6) + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) @@ -8804,7 +8738,7 @@ to: blueprint 3e332949-6785-4aff-ad86-6134f5ce6152 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) diff --git a/live-tests/Cargo.toml b/live-tests/Cargo.toml index 41c896bd0a9..184e0c6af2b 100644 --- a/live-tests/Cargo.toml +++ b/live-tests/Cargo.toml @@ -15,6 +15,7 @@ omicron-workspace-hack.workspace = true [dev-dependencies] anyhow.workspace = true assert_matches.workspace = true +dns-service-client.workspace = true dropshot.workspace = true futures.workspace = true internal-dns-resolver.workspace = true diff --git a/live-tests/tests/test_nexus_handoff.rs b/live-tests/tests/test_nexus_handoff.rs index 4d87a6d4a28..7c819b47769 100644 --- a/live-tests/tests/test_nexus_handoff.rs +++ b/live-tests/tests/test_nexus_handoff.rs @@ -9,24 +9,32 @@ use crate::common::reconfigurator::blueprint_wait_sled_configs_propagated; use anyhow::Context; use common::LiveTestContext; use common::reconfigurator::blueprint_edit_current_target; +use internal_dns_types::config::DnsRecord; +use internal_dns_types::names::ServiceName; use live_tests_macros::live_test; use nexus_db_model::DbMetadataNexusState; use nexus_lockstep_client::types::QuiesceState; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_preparation::PlanningInputFromDb; +use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::PlannerConfig; use nexus_types::deployment::blueprint_zone_type; +use omicron_common::api::external::Generation; use omicron_test_utils::dev::poll::CondCheckError; use omicron_test_utils::dev::poll::wait_for_condition; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; use slog::debug; use slog::info; +use slog::o; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::net::IpAddr; +use std::net::SocketAddr; use std::time::Duration; #[live_test] @@ -307,6 +315,33 @@ async fn test_nexus_handoff(lc: &LiveTestContext) { } info!(log, "new Nexus instances are still not reachable (good)"); + // This is the last point before triggering the handoff. Check the contents + // of both internal and external DNS for Nexus. We should only see the + // original Nexus zones. It's a little hard to be sure this isn't working + // by accident: it's conceivable that the DNS behavior is wrong, but that we + // just haven't updated the DNS servers. Since we expect no change to the + // DNS servers, there's nothing for us to wait for to be sure nothing + // changed. The only way to be sure would be to dig into the blueprint + // execution state to confirm that we successfully propagated DNS for the + // blueprint that we executed. That seems more trouble than it's worth + // here. We do know that we got through at least some of this blueprint's + // execution, based on having seen the sled configurations get propagated to + // sled agent. + check_internal_dns( + log, + &blueprint_initial, + blueprint_initial.nexus_generation, + ) + .await + .expect("internal DNS (before)"); + check_external_dns( + log, + &blueprint_initial, + blueprint_initial.nexus_generation, + ) + .await + .expect("external DNS (before)"); + // Complete the demo saga to unblock quiescing. demo_nexus .saga_demo_complete(&demo_saga.demo_saga_id) @@ -423,4 +458,168 @@ async fn test_nexus_handoff(lc: &LiveTestContext) { ) .await .expect("waiting for cleanup sled configs"); + + // Verify that DNS has been updated to reflect the handoff. This time, + // since we expect a change, we can wait for it to happen. + wait_for_condition( + || async { + check_internal_dns(log, &blueprint_handoff, next_generation) + .await?; + check_external_dns(log, &blueprint_handoff, next_generation) + .await?; + Ok::<_, CondCheckError>(()) + }, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("waiting for post-handoff DNS update"); +} + +/// Checks that current internal DNS reflects that the Nexus instances +/// in-service right now are the ones having generation `active_generation`. +/// +/// Returns `CondCheckError` on failure for use in `wait_for_condition`. +async fn check_internal_dns( + log: &slog::Logger, + blueprint: &Blueprint, + active_generation: Generation, +) -> Result<(), CondCheckError> { + // Compute what we expect to find, based on which Nexus instances in the + // blueprint have the specified generation. + let expected_nexus_addrs = blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_sled_id, _zone_cfg, nexus_config)| { + (nexus_config.nexus_generation == active_generation) + .then_some(nexus_config.internal_address) + }) + .collect::>(); + + // Find the DNS server based on what's currently in the blueprint. + let dns_sockaddr = blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_sled_id, zone_cfg)| { + if let BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { dns_address, .. }, + ) = &zone_cfg.zone_type + { + Some(dns_address) + } else { + None + } + }) + .expect("at least one internal DNS server"); + + // Make a resolver using this DNS server. + let resolver_log = log.new(o!("component" => "VerifyInternalDnsResolver")); + let resolver = internal_dns_resolver::Resolver::new_from_addrs( + resolver_log, + &[SocketAddr::from(*dns_sockaddr)], + ) + .context("creating resolver") + .map_err(CondCheckError::Failed)?; + + // Finally, look up Nexus in DNS and compare it to what we expected. + let found_nexus_addrs = resolver + .lookup_all_socket_v6(ServiceName::Nexus) + .await + .map_err(|_| CondCheckError::NotYet)? + .into_iter() + .collect::>(); + debug!( + log, + "check_internal_dns"; + "expected" => ?expected_nexus_addrs, + "found" => ?found_nexus_addrs, + ); + + if expected_nexus_addrs == found_nexus_addrs { + Ok(()) + } else { + Err(CondCheckError::NotYet) + } +} + +/// Checks that current external DNS reflects that the Nexus instances +/// in-service right now are the ones having generation `active_generation`. +/// +/// Returns `CondCheckError` on failure for use in `wait_for_condition`. +async fn check_external_dns( + log: &slog::Logger, + blueprint: &Blueprint, + active_generation: Generation, +) -> Result<(), CondCheckError> { + // Compute which Nexus instances we expect to find in external DNS based on + // what's in-service in the blueprint. + let expected_nexus_addrs = blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_sled_id, _zone_cfg, nexus_config)| { + (nexus_config.nexus_generation == active_generation) + .then_some(nexus_config.external_ip.ip) + }) + .collect::>(); + + // Find the DNS server based on what's currently in the blueprint. + let dns_http_sockaddr = blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_sled_id, zone_cfg)| { + if let BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { http_address, .. }, + ) = &zone_cfg.zone_type + { + Some(http_address) + } else { + None + } + }) + .expect("at least one external DNS server"); + + // Unfortunately for us, the external DNS servers are not necessarily + // reachable from where we are. So we can't directly look up names in DNS. + // Instead, use the HTTP (config) interface. + let url = format!("http://{}", dns_http_sockaddr); + let client = dns_service_client::Client::new(&url, log.clone()); + let config = client + .dns_config_get() + .await + .map_err(|_| CondCheckError::NotYet)? + .into_inner(); + + let found_nexus_addrs = config + .zones + .into_iter() + .next() + .expect("at least one external DNS zone") + .records + .into_iter() + .find_map(|(name, dns_records)| { + if !name.ends_with(".sys") { + return None; + } + + Some( + dns_records + .into_iter() + .filter_map(|record| match record { + DnsRecord::A(addr) => Some(IpAddr::from(addr)), + DnsRecord::Aaaa(addr) => Some(IpAddr::from(addr)), + _ => None, + }) + .collect::>(), + ) + }) + .expect("at least one silo"); + + debug!( + log, + "check_external_dns"; + "expected" => ?expected_nexus_addrs, + "found" => ?found_nexus_addrs, + ); + + if expected_nexus_addrs == found_nexus_addrs { + Ok(()) + } else { + Err(CondCheckError::NotYet) + } } diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 9fe8119937c..69bb198ff3d 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -22,6 +22,7 @@ use nexus_types::internal_api::params::DnsConfigZone; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; use omicron_common::bail_unless; +use omicron_uuid_kinds::OmicronZoneUuid; use slog::{debug, info, o}; pub(crate) async fn deploy_dns( @@ -31,6 +32,7 @@ pub(crate) async fn deploy_dns( blueprint: &Blueprint, sleds_by_id: &IdOrdMap, overrides: &Overridables, + nexus_id: OmicronZoneUuid, ) -> Result<(), Error> { // First, fetch the current DNS configs. let internal_dns_config_current = datastore @@ -42,6 +44,11 @@ pub(crate) async fn deploy_dns( .await .internal_context("reading current DNS (external)")?; + // Determine the currently-active Nexus generation based on the fact that + // the Nexus executing this blueprint must itself be active. + let active_nexus_generation = + blueprint.find_generation_for_self(nexus_id)?; + // We could check here that the DNS version we found isn't newer than when // the blueprint was generated. But we have to check later when we try to // update the database anyway. And we're not wasting much effort allowing @@ -49,11 +56,13 @@ pub(crate) async fn deploy_dns( // we know it's being hit when we exercise this condition. // Next, construct the DNS config represented by the blueprint. - let internal_dns_zone_blueprint = - blueprint_internal_dns_config(blueprint, sleds_by_id, overrides) - .map_err(|e| Error::InternalError { - internal_message: e.to_string(), - })?; + let internal_dns_zone_blueprint = blueprint_internal_dns_config( + blueprint, + sleds_by_id, + active_nexus_generation, + overrides, + ) + .map_err(|e| Error::InternalError { internal_message: e.to_string() })?; let silos = datastore .silo_list_all_batched(opctx, Discoverability::All) .await @@ -83,6 +92,7 @@ pub(crate) async fn deploy_dns( blueprint, &silos, external_dns_zone_name, + active_nexus_generation, ); // Deploy the changes. @@ -649,6 +659,7 @@ mod test { let blueprint_dns = blueprint_internal_dns_config( &blueprint, &IdOrdMap::new(), + blueprint.nexus_generation, &Default::default(), ) .unwrap(); @@ -786,6 +797,7 @@ mod test { let mut blueprint_dns_zone = blueprint_internal_dns_config( &blueprint, &sleds_by_id, + blueprint.nexus_generation, &Default::default(), ) .unwrap(); @@ -1082,6 +1094,7 @@ mod test { &blueprint, &[], String::from("oxide.test"), + blueprint.nexus_generation, ); assert_eq!(external_dns_zone.zone_name, "oxide.test"); // We'll only have external DNS nameserver records - the A/AAAA records @@ -1129,6 +1142,7 @@ mod test { &blueprint, std::slice::from_ref(my_silo.name()), String::from("oxide.test"), + blueprint.nexus_generation, ); assert_eq!(external_dns_zone.zone_name, String::from("oxide.test")); let records = &external_dns_zone.records; @@ -1195,6 +1209,7 @@ mod test { &blueprint, std::slice::from_ref(my_silo.name()), String::from("oxide.test"), + blueprint.nexus_generation, ); let silo_records = &external_dns_zone .records diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 413c651401e..bbe585cde57 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -228,6 +228,7 @@ pub async fn realize_blueprint( creator, overrides.unwrap_or(&*overridables::DEFAULT), sled_list.clone(), + nexus_id, ); register_cleanup_expunged_zones_step( @@ -492,6 +493,7 @@ fn register_plumb_firewall_rules_step<'a>( .register(); } +#[allow(clippy::too_many_arguments)] fn register_dns_records_step<'a>( registrar: &ComponentRegistrar<'_, 'a>, opctx: &'a OpContext, @@ -500,12 +502,17 @@ fn register_dns_records_step<'a>( creator: OmicronZoneUuid, overrides: &'a Overridables, sleds: SharedStepHandle>>, + nexus_id: Option, ) { registrar .new_step( ExecutionStepId::Ensure, "Deploy DNS records", async move |cx| { + let Some(nexus_id) = nexus_id else { + return StepSkipped::new((), "not running as Nexus").into(); + }; + let sleds_by_id = sleds.into_value(cx.token()).await; let res = dns::deploy_dns( @@ -515,6 +522,7 @@ fn register_dns_records_step<'a>( blueprint, &sleds_by_id, overrides, + nexus_id, ) .await .map_err(|e| anyhow!("{}", InlineErrorChain::new(&e))); diff --git a/nexus/reconfigurator/execution/src/test_utils.rs b/nexus/reconfigurator/execution/src/test_utils.rs index 46525d51527..0aad3330fe9 100644 --- a/nexus/reconfigurator/execution/src/test_utils.rs +++ b/nexus/reconfigurator/execution/src/test_utils.rs @@ -10,12 +10,11 @@ use internal_dns_resolver::Resolver; use nexus_db_queries::{context::OpContext, db::DataStore}; use nexus_types::{ deployment::{ - Blueprint, PendingMgsUpdates, + Blueprint, BlueprintZoneDisposition, PendingMgsUpdates, execution::{EventBuffer, Overridables}, }, quiesce::SagaQuiesceHandle, }; -use omicron_uuid_kinds::OmicronZoneUuid; use update_engine::TerminalKind; use crate::{RealizeBlueprintOutput, RequiredRealizeArgs}; @@ -42,7 +41,18 @@ pub(crate) async fn realize_blueprint_and_expect( let (mgs_updates, _rx) = watch::channel(PendingMgsUpdates::new()); // This helper function does not mess with quiescing. let saga_quiesce = SagaQuiesceHandle::new(opctx.log.clone()); - let nexus_id = OmicronZoneUuid::new_v4(); + // Act on behalf of one of the Nexus instances that could be currently + // active in the blueprint. + let nexus_id = blueprint + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_sled_id, zone_config, nexus_config)| { + (nexus_config.nexus_generation == blueprint.nexus_generation) + .then_some(zone_config.id) + }) + .expect( + "no Nexus found in blueprint that matches the blueprint's \ + Nexus generation", + ); let output = crate::realize_blueprint( RequiredRealizeArgs { opctx, @@ -55,7 +65,7 @@ pub(crate) async fn realize_blueprint_and_expect( saga_quiesce, } .with_overrides(overrides) - .as_nexus(OmicronZoneUuid::new_v4()), + .as_nexus(nexus_id), ) .await // We expect here rather than in the caller because we want to assert that diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 471dbf585c8..388b65b3a32 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -222,6 +222,7 @@ impl super::Nexus { &request.blueprint, vec![silo_name], request.external_dns_zone_name, + request.blueprint.nexus_generation, ); for (name, records) in external_dns_config.records.into_iter() { dns_update.add_name(name, records)?; diff --git a/nexus/src/app/silo.rs b/nexus/src/app/silo.rs index 343703d9319..6d0cf94ea32 100644 --- a/nexus/src/app/silo.rs +++ b/nexus/src/app/silo.rs @@ -100,6 +100,7 @@ impl super::Nexus { // this (very specific) context. let nexus_opctx = self.opctx_external_authn(); let datastore = self.datastore(); + let nexus_id = self.id(); // Set up an external DNS name for this Silo's API and console // endpoints (which are the same endpoint). @@ -111,8 +112,12 @@ impl super::Nexus { .blueprint_target_get_current_full(opctx) .await .internal_context("loading target blueprint")?; - let nexus_external_ips = - blueprint_nexus_external_ips(&target_blueprint); + let active_nexus_generation = + target_blueprint.find_generation_for_self(nexus_id)?; + let nexus_external_ips = blueprint_nexus_external_ips( + &target_blueprint, + active_nexus_generation, + ); let dns_records: Vec = nexus_external_ips .into_iter() .map(|addr| match addr { diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 67ea81dcc03..e9f86155834 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -94,6 +94,7 @@ pub use network_resources::OmicronZoneExternalSnatIp; pub use network_resources::OmicronZoneNetworkResources; pub use network_resources::OmicronZoneNic; pub use network_resources::OmicronZoneNicEntry; +use omicron_common::api::external::Error; pub use planning_input::ClickhouseMode; pub use planning_input::ClickhousePolicy; pub use planning_input::CockroachDbClusterVersion; @@ -458,6 +459,28 @@ impl Blueprint { Ok(gen) } + + /// Returns the Nexus generation number for Nexus `nexus_id`, which is + /// assumed to refer to the currently-running Nexus instance (the current + /// process) + pub fn find_generation_for_self( + &self, + nexus_id: OmicronZoneUuid, + ) -> Result { + for (_sled_id, zone_config, nexus_config) in + self.all_nexus_zones(BlueprintZoneDisposition::is_in_service) + { + if zone_config.id == nexus_id { + return Ok(nexus_config.nexus_generation); + } + } + + Err(Error::internal_error(&format!( + "failed to determine generation of currently-running Nexus: \ + did not find Nexus {} in blueprint {}", + nexus_id, self.id, + ))) + } } /// Description of the source of a blueprint. diff --git a/nexus/types/src/deployment/execution/dns.rs b/nexus/types/src/deployment/execution/dns.rs index 04a28aac921..fb9906df5ba 100644 --- a/nexus/types/src/deployment/execution/dns.rs +++ b/nexus/types/src/deployment/execution/dns.rs @@ -9,7 +9,7 @@ use internal_dns_types::{ config::DnsConfigBuilder, names::{ServiceName, ZONE_APEX_NAME}, }; -use omicron_common::api::external::Name; +use omicron_common::api::external::{Generation, Name}; use crate::{ deployment::{ @@ -29,6 +29,7 @@ use super::{ pub fn blueprint_internal_dns_config( blueprint: &Blueprint, sleds_by_id: &IdOrdMap, + active_nexus_generation: Generation, overrides: &Overridables, ) -> anyhow::Result { // The DNS names configured here should match what RSS configures for the @@ -97,17 +98,20 @@ pub fn blueprint_internal_dns_config( ) => (ServiceName::Cockroach, address), BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { internal_address, + nexus_generation, lockstep_port, .. }) => { - // Add both the `nexus` service as well as the `nexus-lockstep` - // service. Continue so we don't fall through and call - // `host_zone_with_one_backend`. - dns_builder.host_zone_nexus( - zone.id, - *internal_address, - *lockstep_port, - )?; + if *nexus_generation == active_nexus_generation { + // Add both the `nexus` service as well as the + // `nexus-lockstep` service. Continue so we don't fall + // through and call `host_zone_with_one_backend`. + dns_builder.host_zone_nexus( + zone.id, + *internal_address, + *lockstep_port, + )?; + } continue 'all_zones; } BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { @@ -188,8 +192,10 @@ pub fn blueprint_external_dns_config<'a>( blueprint: &Blueprint, silos: impl IntoIterator, external_dns_zone_name: String, + active_nexus_generation: Generation, ) -> DnsConfigZone { - let nexus_external_ips = blueprint_nexus_external_ips(blueprint); + let nexus_external_ips = + blueprint_nexus_external_ips(blueprint, active_nexus_generation); let mut dns_external_ips = blueprint_external_dns_nameserver_ips(blueprint); let nexus_dns_records: Vec = nexus_external_ips diff --git a/nexus/types/src/deployment/execution/utils.rs b/nexus/types/src/deployment/execution/utils.rs index d9ebb853da8..f9e21bc608d 100644 --- a/nexus/types/src/deployment/execution/utils.rs +++ b/nexus/types/src/deployment/execution/utils.rs @@ -6,7 +6,10 @@ use std::net::{IpAddr, SocketAddrV6}; use iddqd::{IdOrdItem, id_upcast}; use nexus_sled_agent_shared::inventory::SledRole; -use omicron_common::address::{Ipv6Subnet, SLED_PREFIX}; +use omicron_common::{ + address::{Ipv6Subnet, SLED_PREFIX}, + api::external::Generation, +}; use omicron_uuid_kinds::SledUuid; use crate::{ @@ -81,16 +84,16 @@ impl IdOrdItem for Sled { id_upcast!(); } -/// Return the Nexus external addresses according to the given blueprint -pub fn blueprint_nexus_external_ips(blueprint: &Blueprint) -> Vec { +/// Return the active Nexus external addresses according to the given blueprint +pub fn blueprint_nexus_external_ips( + blueprint: &Blueprint, + active_generation: Generation, +) -> Vec { blueprint - .all_omicron_zones(BlueprintZoneDisposition::is_in_service) - .filter_map(|(_, z)| match z.zone_type { - BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { - external_ip, - .. - }) => Some(external_ip.ip), - _ => None, + .all_nexus_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_sled_id, _zone_config, nexus_config)| { + (nexus_config.nexus_generation == active_generation) + .then_some(nexus_config.external_ip.ip) }) .collect() } From eafd242a7c72f8e79501190ffee213e2a0112bd4 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 24 Sep 2025 13:48:50 -0400 Subject: [PATCH 11/18] TQ: Add protocol support for LRTQ upgrade (#9065) This PR completes the first version of the sans-io trust quorum protocol implementation. LRTQ upgrade can now be started via `Node::coordinate_upgrade_from_lrtq`. This triggers the coordinating node to start collecting the LRTQ key shares so that they can be used to construct the LRTQ rack secret via the bootstore code. After this occurs, a Prepare message is sent out with this old rack secret encrypted in a manner identical to a normal reconfiguration. The prepare and commit paths remain the same. The cluster proptest was updated to sometimes start out with an existing LRTQ configuration and then to upgrade from there. Like normal reconfigurations it allows aborting and pre-empting of the LRTQ upgrade with a new attempt at a higher epoch. In production this is how we "retry" if the coordinating node crashes prior to commit, or more accurately, if nexus can't talk to the coordinating node for some period of time and just moves on. After the LRTQ upgrade commits, normal reconfigurations are run. We also remove unnecessary config related messages in this commit. Since a `Configuration` does not contain sensitive information it can be retrieved when Nexus polls the coordinator before it commits. Then Nexus can save this info and send it in `PrepareAndCommit` messages rather than having the receiving node try to find a live peer with the config prior to collecting shares. This is a nice optimization that reduces protocol complexity a bit. This removal allowed removing the TODO in the message `match` statement in `Node::handle` and completing the protocol. --- Cargo.lock | 3 + bootstore/src/lib.rs | 6 + bootstore/src/trust_quorum/rack_secret.rs | 1 - trust-quorum/src/configuration.rs | 29 +- trust-quorum/src/coordinator_state.rs | 451 +++++++++++++----- trust-quorum/src/crypto.rs | 10 + trust-quorum/src/lib.rs | 7 +- trust-quorum/src/messages.rs | 29 +- trust-quorum/src/node.rs | 154 +++++- trust-quorum/src/node_ctx.rs | 14 + trust-quorum/src/persistent_state.rs | 10 + trust-quorum/src/validators.rs | 269 ++++++++++- trust-quorum/test-utils/Cargo.toml | 3 + trust-quorum/test-utils/src/event.rs | 9 + trust-quorum/test-utils/src/nexus.rs | 57 ++- trust-quorum/test-utils/src/state.rs | 217 ++++++++- .../tests/cluster.proptest-regressions | 1 + trust-quorum/tests/cluster.rs | 89 +++- 18 files changed, 1166 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1407514acf..1bcbae108e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14452,6 +14452,7 @@ dependencies = [ name = "trust-quorum-test-utils" version = "0.1.0" dependencies = [ + "bootstore", "camino", "daft", "dropshot", @@ -14459,8 +14460,10 @@ dependencies = [ "iddqd", "omicron-uuid-kinds", "omicron-workspace-hack", + "secrecy 0.10.3", "serde", "serde_json", + "sled-hardware-types", "slog", "trust-quorum", ] diff --git a/bootstore/src/lib.rs b/bootstore/src/lib.rs index e5d989a073a..5a8fcf142a0 100644 --- a/bootstore/src/lib.rs +++ b/bootstore/src/lib.rs @@ -11,3 +11,9 @@ use serde::{Deserialize, Serialize}; Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, )] pub struct Sha3_256Digest([u8; 32]); + +impl Sha3_256Digest { + pub fn new(bytes: [u8; 32]) -> Self { + Sha3_256Digest(bytes) + } +} diff --git a/bootstore/src/trust_quorum/rack_secret.rs b/bootstore/src/trust_quorum/rack_secret.rs index 3b7b4f38962..8f30da55a76 100644 --- a/bootstore/src/trust_quorum/rack_secret.rs +++ b/bootstore/src/trust_quorum/rack_secret.rs @@ -90,7 +90,6 @@ impl RackSecret { } /// Combine a set of shares and return a RackSecret - #[allow(unused)] pub fn combine_shares( shares: &[Vec], ) -> Result { diff --git a/trust-quorum/src/configuration.rs b/trust-quorum/src/configuration.rs index 2418f8f14f0..7e3568b5ad9 100644 --- a/trust-quorum/src/configuration.rs +++ b/trust-quorum/src/configuration.rs @@ -5,7 +5,6 @@ //! A configuration of a trust quroum at a given epoch use crate::crypto::{EncryptedRackSecrets, RackSecret, Sha3_256Digest}; -use crate::validators::ValidatedReconfigureMsg; use crate::{Epoch, PlatformId, Threshold}; use daft::Diffable; use gfss::shamir::{Share, SplitError}; @@ -15,7 +14,7 @@ use secrecy::ExposeSecret; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use slog_error_chain::SlogInlineError; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; #[derive(Debug, Clone, thiserror::Error, PartialEq, Eq, SlogInlineError)] pub enum ConfigurationError { @@ -75,6 +74,14 @@ impl IdOrdItem for Configuration { id_upcast!(); } +pub struct NewConfigParams<'a> { + pub rack_id: RackUuid, + pub epoch: Epoch, + pub members: &'a BTreeSet, + pub threshold: Threshold, + pub coordinator_id: &'a PlatformId, +} + impl Configuration { /// Create a new configuration for the trust quorum /// @@ -82,15 +89,15 @@ impl Configuration { /// coordinator will fill this in as necessary after retrieving shares for /// the last committed epoch. pub fn new( - reconfigure_msg: &ValidatedReconfigureMsg, + params: NewConfigParams<'_>, ) -> Result<(Configuration, BTreeMap), ConfigurationError> { - let coordinator = reconfigure_msg.coordinator_id().clone(); + let coordinator = params.coordinator_id.clone(); let rack_secret = RackSecret::new(); let shares = rack_secret.split( - reconfigure_msg.threshold(), - reconfigure_msg - .members() + params.threshold, + params + .members .len() .try_into() .map_err(|_| ConfigurationError::TooManyMembers)?, @@ -106,7 +113,7 @@ impl Configuration { let mut members: BTreeMap = BTreeMap::new(); let mut shares: BTreeMap = BTreeMap::new(); for (platform_id, (share, digest)) in - reconfigure_msg.members().iter().cloned().zip(shares_and_digests) + params.members.iter().cloned().zip(shares_and_digests) { members.insert(platform_id.clone(), digest); shares.insert(platform_id, share); @@ -114,11 +121,11 @@ impl Configuration { Ok(( Configuration { - rack_id: reconfigure_msg.rack_id(), - epoch: reconfigure_msg.epoch(), + rack_id: params.rack_id, + epoch: params.epoch, coordinator, members, - threshold: reconfigure_msg.threshold(), + threshold: params.threshold, encrypted_rack_secrets: None, }, shares, diff --git a/trust-quorum/src/coordinator_state.rs b/trust-quorum/src/coordinator_state.rs index e50cfc577ac..1a13240084a 100644 --- a/trust-quorum/src/coordinator_state.rs +++ b/trust-quorum/src/coordinator_state.rs @@ -4,17 +4,36 @@ //! State of a reconfiguration coordinator inside a [`crate::Node`] -use crate::configuration::ConfigurationDiff; -use crate::crypto::{LrtqShare, PlaintextRackSecrets, ShareDigestLrtq}; -use crate::validators::{ReconfigurationError, ValidatedReconfigureMsg}; +use crate::NodeHandlerCtx; +use crate::configuration::{ConfigurationDiff, ConfigurationError}; +use crate::crypto::{LrtqShare, PlaintextRackSecrets, ReconstructedRackSecret}; +use crate::validators::{ + ReconfigurationError, ValidatedLrtqUpgradeMsg, ValidatedReconfigureMsg, +}; use crate::{Configuration, Epoch, PeerMsgKind, PlatformId, RackSecret}; -use crate::{NodeHandlerCtx, ValidatedReconfigureMsgDiff}; +use bootstore::trust_quorum::RackSecret as LrtqRackSecret; use daft::{Diffable, Leaf}; use gfss::shamir::Share; use slog::{Logger, error, info, o, warn}; use std::collections::{BTreeMap, BTreeSet}; use std::mem; +// A coordinator can be upgrading from LRTQ or reconfiguring a TQ config. +#[derive(Clone, Debug, PartialEq, Eq, Diffable)] +pub enum CoordinatingMsg { + Upgrade(ValidatedLrtqUpgradeMsg), + Reconfig(ValidatedReconfigureMsg), +} + +impl CoordinatingMsg { + pub fn epoch(&self) -> Epoch { + match self { + Self::Upgrade(msg) => msg.epoch(), + Self::Reconfig(msg) => msg.epoch(), + } + } +} + /// The state of a reconfiguration coordinator. /// /// A coordinator can be any trust quorum node that is a member of both the old @@ -23,17 +42,15 @@ use std::mem; /// when the control plane is up, as we use Nexus to persist prepares and ensure /// commitment happens, even if the system crashes while committing. If a /// rack crash (such as a power outage) occurs before nexus is informed of the -/// prepares, nexus will skip the epoch and start a new reconfiguration. This +/// prepare acks, nexus will skip the epoch and start a new reconfiguration. This /// allows progress to always be made with a full linearization of epochs. -/// -/// We allow some unused fields before we complete the coordination code #[derive(Clone, Debug, Diffable)] pub struct CoordinatorState { #[daft(ignore)] log: Logger, - /// A copy of the message used to start this reconfiguration - reconfigure_msg: ValidatedReconfigureMsg, + /// A copy of the message used to start this coordination + msg: CoordinatingMsg, /// Configuration that will get persisted inside a `Prepare` message in a /// `Node`s `PersistentState`, once it is possible to create the Prepare. @@ -46,8 +63,8 @@ pub struct CoordinatorState { // For diffs we want to allow access to all fields, but not make them public in // the `CoordinatorState` type itself. impl<'daft> CoordinatorStateDiff<'daft> { - pub fn reconfigure_msg(&self) -> &ValidatedReconfigureMsgDiff<'daft> { - &self.reconfigure_msg + pub fn msg(&self) -> Leaf<&CoordinatingMsg> { + self.msg } pub fn configuration(&self) -> &ConfigurationDiff<'daft> { @@ -62,7 +79,7 @@ impl<'daft> CoordinatorStateDiff<'daft> { #[cfg(feature = "danger_partial_eq_ct_wrapper")] impl PartialEq for CoordinatorState { fn eq(&self, other: &Self) -> bool { - self.reconfigure_msg == other.reconfigure_msg + self.msg == other.msg && self.configuration == other.configuration && self.op == other.op } @@ -77,12 +94,13 @@ impl CoordinatorState { /// Return the newly constructed `CoordinatorState` along with this node's /// `PrepareMsg` so that it can be persisted. pub fn new_uninitialized( - log: Logger, + log: &Logger, msg: ValidatedReconfigureMsg, ) -> Result<(CoordinatorState, Configuration, Share), ReconfigurationError> { + let log = log.new(o!("component" => "tq-coordinator-state")); // Create a configuration for this epoch - let (config, shares) = Configuration::new(&msg)?; + let (config, shares) = Configuration::new((&msg).into())?; let mut prepares = BTreeMap::new(); // `my_share` is optional only so that we can fill it in via the @@ -121,12 +139,13 @@ impl CoordinatorState { /// A reconfiguration from one group to another pub fn new_reconfiguration( - log: Logger, + log: &Logger, msg: ValidatedReconfigureMsg, latest_committed_config: &Configuration, our_latest_committed_share: Share, ) -> Result { - let (config, new_shares) = Configuration::new(&msg)?; + let log = log.new(o!("component" => "tq-coordinator-state")); + let (config, new_shares) = Configuration::new((&msg).into())?; info!( log, @@ -151,6 +170,45 @@ impl CoordinatorState { Ok(CoordinatorState::new(log, msg, config, op)) } + pub fn new_upgrade_from_lrtq( + log: &Logger, + ctx: &mut impl NodeHandlerCtx, + msg: ValidatedLrtqUpgradeMsg, + ) -> Result { + let log = log.new(o!("component" => "tq-coordinator-state")); + let (configuration, new_shares) = Configuration::new((&msg).into())?; + + info!( + log, + "Starting coordination for LRTQ upgrade on existing node"; + "epoch" => %configuration.epoch + ); + + // We must collect the LRTQ shares so we can recompute the LRTQ rack + // secret. + let op = CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares: BTreeMap::from([( + msg.coordinator_id().clone(), + LrtqShare::new( + ctx.persistent_state() + .lrtq + .as_ref() + .expect("lrtq config exists") + .share + .clone(), + ), + )]), + new_shares, + }; + + Ok(CoordinatorState { + log, + msg: CoordinatingMsg::Upgrade(msg), + configuration, + op, + }) + } + // Intentionally private! // // The public constructors `new_uninitialized` and `new_reconfiguration` are @@ -163,15 +221,15 @@ impl CoordinatorState { ) -> CoordinatorState { CoordinatorState { log: log.new(o!("component" => "tq-coordinator-state")), - reconfigure_msg, + msg: CoordinatingMsg::Reconfig(reconfigure_msg), configuration, op, } } - /// Return the `ValidatedReconfigureMsg` that started this reconfiguration - pub fn reconfigure_msg(&self) -> &ValidatedReconfigureMsg { - &self.reconfigure_msg + /// Return the validated msg that started this reconfiguration + pub fn msg(&self) -> &CoordinatingMsg { + &self.msg } pub fn op(&self) -> &CoordinatorOperation { @@ -196,9 +254,9 @@ impl CoordinatorState { .expect("config exists") .members .keys() - .filter(|&m| { - !old_collected_shares.contains_key(m) - && ctx.connected().contains(m) + .filter(|&id| { + !old_collected_shares.contains_key(id) + && ctx.connected().contains(id) }) .cloned() .collect(); @@ -206,8 +264,24 @@ impl CoordinatorState { ctx.send(to, PeerMsgKind::GetShare(*old_epoch)); } } - #[expect(unused)] - CoordinatorOperation::CollectLrtqShares { members, shares } => {} + CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares, + .. + } => { + let destinations: Vec<_> = self + .configuration + .members + .keys() + .filter(|&id| { + !collected_lrtq_shares.contains_key(id) + && ctx.connected().contains(id) + }) + .cloned() + .collect(); + for to in destinations { + ctx.send(to, PeerMsgKind::GetLrtqShare); + } + } CoordinatorOperation::Prepare { prepares, .. } => { for (platform_id, (config, share)) in prepares.iter() { if ctx.connected().contains(&platform_id) { @@ -225,8 +299,6 @@ impl CoordinatorState { } // Send any required messages to a newly connected node - // This method is "in progress" - allow unused parameters for now - #[expect(unused)] pub fn send_msgs_to( &mut self, ctx: &mut impl NodeHandlerCtx, @@ -250,8 +322,18 @@ impl CoordinatorState { ctx.send(to, PeerMsgKind::GetShare(*old_epoch)); } } - CoordinatorOperation::CollectLrtqShares { members, shares } => {} - CoordinatorOperation::Prepare { prepares, prepare_acks } => { + CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares, + .. + } => { + if !collected_lrtq_shares.contains_key(&to) + && ctx.connected().contains(&to) + && self.configuration.members.contains_key(&to) + { + ctx.send(to, PeerMsgKind::GetLrtqShare); + } + } + CoordinatorOperation::Prepare { prepares, .. } => { if let Some((config, share)) = prepares.get(&to) { ctx.send( to, @@ -289,7 +371,7 @@ impl CoordinatorState { prepare_acks.insert(from); } op => { - warn!( + info!( self.log, "Ack received when coordinator is not preparing"; "op" => op.name(), @@ -370,27 +452,6 @@ impl CoordinatorState { } }; - // Reconstruct the new rack secret from the shares we created - // at coordination start time. - let shares: Vec<_> = new_shares.values().cloned().collect(); - let new_rack_secret = match RackSecret::reconstruct(&shares) { - Ok(secret) => { - info!( - log, - "Successfully reconstructed new rack secret" - ); - secret - } - Err(err) => { - error!( - log, - "Failed to reconstruct new rack secret"; - &err - ); - return; - } - }; - // Decrypt the encrypted rack secrets from the old config so // that we can add `old_rack_secret` to that set for use in the // new configuration. @@ -413,81 +474,238 @@ impl CoordinatorState { }; plaintext_secrets.insert(*old_epoch, old_rack_secret); - // Now encrypt the set of old rack secrets with the new rack - // secret. - let new_encrypted_rack_secrets = match plaintext_secrets - .encrypt( - self.configuration.rack_id, - new_epoch, - &new_rack_secret, - ) { - Ok(ciphertext) => ciphertext, - Err(_) => { - error!(log, "Failed to encrypt plaintext rack secrets"); - return; - } - }; - - // Save the encrypted rack secrets in the current configuration - // - // A new configuration is always created with a `None` value - // for `encrypted_rack_secrets`, as it gets filled in here. - // - // If we change that it's a programmer error that will be caught - // immediately by our tests. - assert!(self.configuration.encrypted_rack_secrets.is_none()); - self.configuration.encrypted_rack_secrets = - Some(new_encrypted_rack_secrets); - // Take `new_shares` out of `self.op` so we can include them in // `Prepare` messages; - let mut new_shares = mem::take(new_shares); - - // Update our persistent state - // - // We remove ourself because we don't send a `Prepare` message - // to ourself. - // - // SAFETY: our share already exists at this point and has been - // validated as part of the `Configuration` construction. - let share = new_shares - .remove(ctx.platform_id()) - .expect("my share exists"); - ctx.update_persistent_state(|ps| { - ps.shares.insert(new_epoch, share); - ps.configs - .insert_unique(self.configuration.clone()) - .expect("no existing configuration"); - true - }); - - // Now transition to `CoordinatorOperation::Prepare` - let prepares: BTreeMap<_, _> = new_shares - .into_iter() - .map(|(id, share)| { - (id, (self.configuration.clone(), share)) + let new_shares = mem::take(new_shares); + + // Start Preparing + self.transition_to_preparing( + ctx, + log, + new_shares, + plaintext_secrets, + ); + } + op => { + info!( + self.log, + "Share received when coordinator is not expecting it"; + "op" => op.name(), + "epoch" => %epoch, + "from" => %from + ); + } + } + } + + pub fn handle_lrtq_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + share: LrtqShare, + ) { + match &mut self.op { + CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares, + new_shares, + } => { + let log = self.log.new(o!( + "last_committed_epoch" => "lrtq", + "new_epoch" => self.configuration.epoch.to_string() + )); + + // Do we already have a share from this node? + if collected_lrtq_shares.contains_key(&from) { + return; + } + + // SAFETY: If we are collecting LRTQ shares it means we have + // `LrtqShareData` in our `PersistentState`. + let lrtq_share_data = ctx + .persistent_state() + .lrtq + .as_ref() + .expect("lrtq share data exists"); + + // We don't know which share digest corresponds to which node + // and so we check to ensure that the digest of the share is one + // of the valid digests we expect. + let digest = share.digest().into(); + if !lrtq_share_data.share_digests.contains(&digest) { + error!( + log, + "Received LrtqShare with invalid digest"; + "from" => %from + ); + return; + } + + // We double check to ensure that a different node didn't send + // the same share. This should never happen, or LRTQ would be + // busted on existing racks, but we sanity check here anyway. + if let Some((matching_id, _)) = + collected_lrtq_shares.iter().find(|(_, s)| { + let computed: bootstore::Sha3_256Digest = + s.digest().into(); + computed == digest }) + { + error!( + log, + "Received share with digest matching another node"; + "from" => %from, + "matching_node" => %matching_id + ); + return; + } + + // We have a new, unique, LRTQ share. Save it. + collected_lrtq_shares.insert(from, share); + + // Do we have enough shares to recompute the LRTQ rack secret? + if collected_lrtq_shares.len() + < lrtq_share_data.threshold as usize + { + return; + } + + // Reconstruct the LRTQ rack secret + let shares: Vec<_> = collected_lrtq_shares + .values() + .map(|s| s.inner().clone()) .collect(); - self.op = CoordinatorOperation::Prepare { - prepares, - // Always include ourself - prepare_acks: BTreeSet::from([ctx.platform_id().clone()]), - }; + let lrtq_rack_secret: ReconstructedRackSecret = + match LrtqRackSecret::combine_shares(&shares) { + Ok(secret) => { + info!( + log, + "Successfully reconstructed LRTQ rack secret" + ); + secret.into() + } + Err(err) => { + error!( + self.log, + "Failed to reconstruct LRTQ rack secret"; + "err" => err.to_string() + ); + return; + } + }; + + // There are no old encrytped rack secrets for LRTQ + // LRTQ is always at Epoch 1 + let mut plaintext_secrets = PlaintextRackSecrets::new(); + plaintext_secrets.insert(Epoch(1), lrtq_rack_secret); - info!(log, "Starting to prepare after collecting shares"); - self.send_msgs(ctx); + // Take `new_shares` out of `self.op` so we can include them in + // `Prepare` messages; + let new_shares = mem::take(new_shares); + + // Start Preparing + self.transition_to_preparing( + ctx, + log, + new_shares, + plaintext_secrets, + ); } op => { - warn!( + info!( self.log, - "Share received when coordinator is not expecting it"; + "LrtqShare received when coordinator is not expecting it"; "op" => op.name(), - "epoch" => %epoch, "from" => %from ); } } } + + // Make the jump from collecting shares or lrtq shares to sending out + // prepare messages and waiting for acks. + fn transition_to_preparing( + &mut self, + ctx: &mut impl NodeHandlerCtx, + log: Logger, + mut new_shares: BTreeMap, + plaintext_secrets: PlaintextRackSecrets, + ) { + let new_epoch = self.configuration.epoch; + + // Reconstruct the new rack secret from the shares we created + // at coordination start time. + let shares: Vec<_> = new_shares.values().cloned().collect(); + let new_rack_secret = match RackSecret::reconstruct(&shares) { + Ok(secret) => { + info!(log, "Successfully reconstructed new rack secret"); + secret + } + Err(err) => { + error!( + log, + "Failed to reconstruct new rack secret"; + &err + ); + return; + } + }; + + // Now encrypt the set of old rack secrets with the new rack + // secret. + let new_encrypted_rack_secrets = match plaintext_secrets.encrypt( + self.configuration.rack_id, + new_epoch, + &new_rack_secret, + ) { + Ok(ciphertext) => ciphertext, + Err(_) => { + error!(log, "Failed to encrypt plaintext rack secrets"); + return; + } + }; + + // Save the encrypted rack secrets in the current configuration + // + // A new configuration is always created with a `None` value + // for `encrypted_rack_secrets`, as it gets filled in here. + // + // If we change that it's a programmer error that will be caught + // immediately by our tests. + assert!(self.configuration.encrypted_rack_secrets.is_none()); + self.configuration.encrypted_rack_secrets = + Some(new_encrypted_rack_secrets); + + // Update our persistent state + // + // We remove ourself because we don't send a `Prepare` message + // to ourself. + // + // SAFETY: our share already exists at this point and has been + // validated as part of the `Configuration` construction. + let share = + new_shares.remove(ctx.platform_id()).expect("my share exists"); + ctx.update_persistent_state(|ps| { + ps.shares.insert(new_epoch, share); + ps.configs + .insert_unique(self.configuration.clone()) + .expect("no existing configuration"); + true + }); + + // Now transition to `CoordinatorOperation::Prepare` + let prepares: BTreeMap<_, _> = new_shares + .into_iter() + .map(|(id, share)| (id, (self.configuration.clone(), share))) + .collect(); + self.op = CoordinatorOperation::Prepare { + prepares, + // Always include ourself + prepare_acks: BTreeSet::from([ctx.platform_id().clone()]), + }; + + info!(log, "Starting to prepare after collecting shares"); + self.send_msgs(ctx); + } } /// What should the coordinator be doing? @@ -502,11 +720,12 @@ pub enum CoordinatorOperation { // until we get to `CoordinatorOperation::Prepare` new_shares: BTreeMap, }, - // We haven't started implementing this yet - // Epoch is always 0 CollectLrtqShares { - members: BTreeMap, - shares: BTreeMap, + collected_lrtq_shares: BTreeMap, + + // These are new shares that the coordinator created that we carry along + // until we get to `CoordinatorOperation::Prepare` + new_shares: BTreeMap, }, Prepare { /// The set of Prepares to send to each node diff --git a/trust-quorum/src/crypto.rs b/trust-quorum/src/crypto.rs index 0361e0654bc..8227bdef5b8 100644 --- a/trust-quorum/src/crypto.rs +++ b/trust-quorum/src/crypto.rs @@ -67,6 +67,10 @@ impl LrtqShare { Sha3_256::digest(&self.0).as_slice().try_into().unwrap(), )) } + + pub fn inner(&self) -> &Vec { + &self.0 + } } #[derive( @@ -89,6 +93,12 @@ impl std::fmt::Debug for Sha3_256Digest { } } +impl From for bootstore::Sha3_256Digest { + fn from(value: ShareDigestLrtq) -> Self { + bootstore::Sha3_256Digest::new(value.0.0) + } +} + /// A boxed array containing rack secret data /// /// We explicitly choose to box the data so that it is not littered around diff --git a/trust-quorum/src/lib.rs b/trust-quorum/src/lib.rs index 7d95539b583..d7259c6385a 100644 --- a/trust-quorum/src/lib.rs +++ b/trust-quorum/src/lib.rs @@ -29,10 +29,13 @@ mod rack_secret_loader; mod validators; pub use configuration::Configuration; pub use coordinator_state::{ - CoordinatorOperation, CoordinatorState, CoordinatorStateDiff, + CoordinatingMsg, CoordinatorOperation, CoordinatorState, + CoordinatorStateDiff, }; pub use rack_secret_loader::{LoadRackSecretError, RackSecretLoaderDiff}; -pub use validators::ValidatedReconfigureMsgDiff; +pub use validators::{ + ValidatedLrtqUpgradeMsgDiff, ValidatedReconfigureMsgDiff, +}; mod alarm; pub use alarm::Alarm; diff --git a/trust-quorum/src/messages.rs b/trust-quorum/src/messages.rs index 0d502bdcc50..fc8e3235255 100644 --- a/trust-quorum/src/messages.rs +++ b/trust-quorum/src/messages.rs @@ -22,6 +22,20 @@ pub struct ReconfigureMsg { pub threshold: Threshold, } +/// A request from nexus informing a node to start coordinating an upgrade from +/// LRTQ +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct LrtqUpgradeMsg { + pub rack_id: RackUuid, + pub epoch: Epoch, + // The members of the LRTQ cluster must be the same as the members of the + // upgraded trust quorum cluster. This is implicit, as the membership of the + // LRTQ cluster is computed based on the existing control plane sleds known + // to Nexus. + pub members: BTreeSet, + pub threshold: Threshold, +} + /// Messages sent between trust quorum members over a sprockets channel #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(feature = "danger_partial_eq_ct_wrapper", derive(PartialEq, Eq))] @@ -50,14 +64,6 @@ pub enum PeerMsgKind { /// Acknowledge a successful prepare from a coordinator PrepareAck(Epoch), - /// Retrieve a configuration for a given epoch from a node. Nodes only - /// respond if this is the current configuration and the requesting node is - /// a member of the configuration. - GetConfig(Epoch), - - /// A configuration returned in response to `GetConfig` - Config(Configuration), - /// Request a node's key share for the given epoch from that node GetShare(Epoch), @@ -67,7 +73,7 @@ pub enum PeerMsgKind { share: Share, }, - // LRTQ shares are always at epoch 0 + // LRTQ shares are always at epoch 1 GetLrtqShare, LrtqShare(LrtqShare), @@ -90,8 +96,6 @@ impl PeerMsgKind { match self { Self::Prepare { .. } => "prepare", Self::PrepareAck(_) => "prepare_ack", - Self::GetConfig(_) => "get_config", - Self::Config(_) => "config", Self::GetShare(_) => "get_share", Self::Share { .. } => "share", Self::GetLrtqShare => "get_lrtq_share", @@ -110,9 +114,6 @@ impl PeerMsgKind { Self::Prepare { config: config1, .. }, Self::Prepare { config: config2, .. }, ) => config1.equal_except_for_crypto_data(config2), - (Self::Config(config1), Self::Config(config2)) => { - config1.equal_except_for_crypto_data(config2) - } ( Self::Share { epoch: epoch1, .. }, Self::Share { epoch: epoch2, .. }, diff --git a/trust-quorum/src/node.rs b/trust-quorum/src/node.rs index 7871165e5b7..1ce2a4f22fd 100644 --- a/trust-quorum/src/node.rs +++ b/trust-quorum/src/node.rs @@ -16,12 +16,14 @@ //! Node, and so this should not be problematic. use crate::compute_key_share::KeyShareComputer; -use crate::crypto::ReconstructedRackSecret; +use crate::coordinator_state::CoordinatingMsg; +use crate::crypto::{LrtqShare, ReconstructedRackSecret}; use crate::rack_secret_loader::{ LoadRackSecretError, RackSecretLoader, RackSecretLoaderDiff, }; use crate::validators::{ - MismatchedRackIdError, ReconfigurationError, ValidatedReconfigureMsg, + LrtqUpgradeError, MismatchedRackIdError, ReconfigurationError, + ValidatedLrtqUpgradeMsg, ValidatedReconfigureMsg, }; use crate::{ Alarm, Configuration, CoordinatorState, Epoch, ExpungedMetadata, @@ -125,12 +127,25 @@ impl Node { ctx: &mut impl NodeHandlerCtx, msg: ReconfigureMsg, ) -> Result<(), ReconfigurationError> { + let last_reconfig_msg = if let Some(cs) = &self.coordinator_state { + match cs.msg() { + CoordinatingMsg::Upgrade(_) => { + return Err( + ReconfigurationError::UpgradeFromLrtqInProgress, + ); + } + CoordinatingMsg::Reconfig(msg) => Some(msg), + } + } else { + None + }; + let Some(validated_msg) = ValidatedReconfigureMsg::new( &self.log, ctx.platform_id(), msg, ctx.persistent_state().into(), - self.coordinator_state.as_ref().map(|cs| cs.reconfigure_msg()), + last_reconfig_msg, )? else { // This was an idempotent (duplicate) request. @@ -159,6 +174,39 @@ impl Node { Ok(()) } + /// Start coordinating an upgrade from LRTQ + pub fn coordinate_upgrade_from_lrtq( + &mut self, + ctx: &mut impl NodeHandlerCtx, + msg: LrtqUpgradeMsg, + ) -> Result<(), LrtqUpgradeError> { + let validated_msg = ValidatedLrtqUpgradeMsg::new(&self.log, ctx, msg)?; + if let Some(kcs) = &self.key_share_computer { + // We know from our `ValidatedLrtqUpgradeMsg` that we haven't seen a + // newer configuration Therefore if we are computing a key share, we + // must be doing it for a stale commit and should cancel it. + // + // I don't think it's actually possible to hit this condition, but + // we check anyway. + info!( + self.log, + "Upgrade from LRTQ started. Cancelling key share compute"; + "reconfiguration_epoch" => %validated_msg.epoch(), + "key_share_compute_epoch" => %kcs.config().epoch + ); + self.key_share_computer = None; + } + + self.coordinator_state = Some(CoordinatorState::new_upgrade_from_lrtq( + &self.log, + ctx, + validated_msg, + )?); + self.send_coordinator_msgs(ctx); + + Ok(()) + } + pub fn is_computing_key_share(&self) -> bool { self.key_share_computer.is_some() } @@ -303,7 +351,7 @@ impl Node { // Are we currently coordinating for this epoch? // Stop coordinating if we are. if let Some(cs) = &self.coordinator_state { - if cs.reconfigure_msg().epoch() == epoch { + if cs.msg().epoch() == epoch { info!( self.log, "Stopping coordination due to commit"; @@ -389,9 +437,12 @@ impl Node { PeerMsgKind::Expunged(epoch) => { self.handle_expunged(ctx, from, epoch); } - _ => todo!( - "cannot handle message variant yet - not implemented: {msg:?}" - ), + PeerMsgKind::GetLrtqShare => { + self.handle_get_lrtq_share(ctx, from); + } + PeerMsgKind::LrtqShare(share) => { + self.handle_lrtq_share(ctx, from, share); + } } } @@ -403,7 +454,7 @@ impl Node { fn handle_prepare_ack(&mut self, from: PlatformId, epoch: Epoch) { // Are we coordinating for this epoch? if let Some(cs) = &mut self.coordinator_state { - let current_epoch = cs.reconfigure_msg().epoch(); + let current_epoch = cs.msg().epoch(); if current_epoch == epoch { info!(self.log, "Received prepare ack"; "from" => %from, @@ -592,7 +643,7 @@ impl Node { } if let Some(cs) = &self.coordinator_state { - let coordinating_epoch = cs.reconfigure_msg().epoch(); + let coordinating_epoch = cs.msg().epoch(); // Are we coordinating for an older epoch? If so, cancel. if coordinating_epoch < config.epoch { @@ -853,7 +904,7 @@ impl Node { // Nexus. In either case the rest of the system has moved on and we // should stop coordinating. if let Some(cs) = &self.coordinator_state { - if msg_epoch > cs.reconfigure_msg().epoch() { + if msg_epoch > cs.msg().epoch() { // This prepare is for a newer configuration than the one we are // currently coordinating. We must cancel our coordination as Nexus // has moved on. @@ -863,7 +914,7 @@ impl Node { ); info!(self.log, "{cancel_msg}"; "msg_epoch" => %msg_epoch, - "epoch" => %cs.reconfigure_msg().epoch(), + "epoch" => %cs.msg().epoch(), "from" => %from ); self.coordinator_state = None; @@ -874,6 +925,81 @@ impl Node { ctx.send(from, PeerMsgKind::PrepareAck(msg_epoch)); } + fn handle_get_lrtq_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + ) { + // Have we already committed a TQ config? + if let Some(latest_committed_config) = + ctx.persistent_state().latest_committed_configuration() + { + if !latest_committed_config.members.contains_key(&from) { + info!( + self.log, + "Received a GetLrtqShare message from expunged node"; + "from" => %from, + "latest_committed_epoch" => + %latest_committed_config.epoch, + ); + ctx.send( + from, + PeerMsgKind::Expunged(latest_committed_config.epoch), + ); + return; + } + info!( + self.log, + concat!( + "Received 'GetLrtqShare' from stale node. ", + "Responded with 'CommitAdvance'." + ); + "from" => %from, + "latest_committed_epoch" => %latest_committed_config.epoch, + ); + ctx.send( + from, + PeerMsgKind::CommitAdvance(latest_committed_config.clone()), + ); + return; + } + + // Do we have the LRTQ share? + // + // We always return an LRTQ share to anyone who asks if we have it. This + // matches the LRTQ protocol. + if let Some(lrtq_share_data) = &ctx.persistent_state().lrtq { + info!( + self.log, + "Received 'GetLrtqShare'. Responded with 'LrtqShare'."; + "from" => %from, + ); + ctx.send( + from, + PeerMsgKind::LrtqShare(LrtqShare::new( + lrtq_share_data.share.clone(), + )), + ); + } else { + warn!( + self.log, + "Received 'GetLrtqShare', but it's missing."; + "from" => %from, + ); + } + } + + fn handle_lrtq_share( + &mut self, + ctx: &mut impl NodeHandlerCtx, + from: PlatformId, + share: LrtqShare, + ) { + if let Some(cs) = &mut self.coordinator_state { + cs.handle_lrtq_share(ctx, from.clone(), share.clone()); + } + } + // Send any required messages as a reconfiguration coordinator fn send_coordinator_msgs(&mut self, ctx: &mut impl NodeHandlerCtx) { // This function is called unconditionally in `tick` callbacks. In this @@ -906,12 +1032,10 @@ impl Node { ctx: &mut impl NodeHandlerCtx, msg: ValidatedReconfigureMsg, ) -> Result<(), ReconfigurationError> { - let log = self.log.new(o!("component" => "tq-coordinator-state")); - // We have no committed configuration or lrtq ledger if ctx.persistent_state().is_uninitialized() { let (coordinator_state, my_config, my_share) = - CoordinatorState::new_uninitialized(log, msg)?; + CoordinatorState::new_uninitialized(&self.log, msg)?; self.coordinator_state = Some(coordinator_state); ctx.update_persistent_state(move |ps| { ps.shares.insert(my_config.epoch, my_share); @@ -929,7 +1053,7 @@ impl Node { .expect("committed configuration exists"); self.coordinator_state = Some(CoordinatorState::new_reconfiguration( - log, + &self.log, msg, config, our_share.clone(), diff --git a/trust-quorum/src/node_ctx.rs b/trust-quorum/src/node_ctx.rs index c312a6daf9d..9e70063bc9f 100644 --- a/trust-quorum/src/node_ctx.rs +++ b/trust-quorum/src/node_ctx.rs @@ -134,6 +134,20 @@ impl NodeCtx { } } + pub fn new_with_persistent_state( + platform_id: PlatformId, + persistent_state: PersistentState, + ) -> NodeCtx { + NodeCtx { + platform_id, + persistent_state, + persistent_state_changed: false, + outgoing: Vec::new(), + connected: BTreeSet::new(), + alarms: BTreeSet::new(), + } + } + #[cfg(any(test, feature = "testing"))] pub fn clear_mutable_state(&mut self) { self.persistent_state_changed = false; diff --git a/trust-quorum/src/persistent_state.rs b/trust-quorum/src/persistent_state.rs index 28435de15db..ef63b5441aa 100644 --- a/trust-quorum/src/persistent_state.rs +++ b/trust-quorum/src/persistent_state.rs @@ -48,6 +48,16 @@ impl PersistentState { } } + pub fn new_lrtq_only(lrtq: LrtqShareData) -> PersistentState { + PersistentState { + lrtq: Some(lrtq), + configs: IdOrdMap::new(), + shares: BTreeMap::new(), + commits: BTreeSet::new(), + expunged: None, + } + } + pub fn rack_id(&self) -> Option { self.latest_config().map(|c| c.rack_id).or_else(|| { self.lrtq diff --git a/trust-quorum/src/validators.rs b/trust-quorum/src/validators.rs index ffa361dc1f2..28bfa70b4d6 100644 --- a/trust-quorum/src/validators.rs +++ b/trust-quorum/src/validators.rs @@ -4,9 +4,12 @@ //! Various validation functions to be used by a [`crate::Node`] -use crate::configuration::ConfigurationError; +use crate::configuration::{ConfigurationError, NewConfigParams}; use crate::messages::ReconfigureMsg; -use crate::{Epoch, PersistentStateSummary, PlatformId, Threshold}; +use crate::{ + Epoch, LrtqUpgradeMsg, NodeHandlerCtx, PersistentStateSummary, PlatformId, + Threshold, +}; use daft::{BTreeSetDiff, Diffable, Leaf}; use omicron_uuid_kinds::RackUuid; use slog::{Logger, error, info, warn}; @@ -69,8 +72,12 @@ pub enum ReconfigurationError { #[error("upgrade from LRTQ required")] UpgradeFromLrtqRequired, + #[error("upgrade from LRTQ in progress")] + UpgradeFromLrtqInProgress, + #[error( - "number of members: {num_members:?} must be greater than threshold: {threshold:?}" + "number of members: {num_members:?} must be greater than threshold: \ + {threshold:?}" )] ThresholdMismatch { num_members: usize, threshold: Threshold }, @@ -85,7 +92,8 @@ pub enum ReconfigurationError { InvalidThreshold(Threshold), #[error( - "Node has last committed epoch of {node_epoch:?}, message contains {msg_epoch:?}" + "Node has last committed epoch of {node_epoch:?}, \ + message contains {msg_epoch:?}" )] LastCommittedEpochMismatch { node_epoch: Option, @@ -93,7 +101,8 @@ pub enum ReconfigurationError { }, #[error( - "sled has already prepared a request at epoch {existing:?}, and cannot prepare another at a smaller or equivalent epoch {new:?}" + "sled has already prepared a request at epoch {existing:?}, \ + and cannot prepare another at a smaller or equivalent epoch {new:?}" )] PreparedEpochMismatch { existing: Epoch, new: Epoch }, @@ -111,7 +120,8 @@ pub enum ReconfigurationError { SledExpungedError, ), #[error( - "reconfiguration in progress at epoch {current_epoch:?}: cannot reconfigure for older epoch {msg_epoch:?}" + "reconfiguration in progress at epoch {current_epoch:?}: cannot \ + reconfigure for older epoch {msg_epoch:?}" )] ReconfigurationInProgress { current_epoch: Epoch, msg_epoch: Epoch }, @@ -122,6 +132,68 @@ pub enum ReconfigurationError { Configuration(#[from] ConfigurationError), } +#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] +pub enum LrtqUpgradeError { + #[error("invalid rack id")] + InvalidRackId( + #[from] + #[source] + MismatchedRackIdError, + ), + + #[error("cannot commit: expunged at epoch {epoch} by {from}")] + Expunged { epoch: Epoch, from: PlatformId }, + + #[error("not an lrtq node - no lrtq key share")] + NoLrtqShare, + + #[error("already upgraded from lrtq: committed epoch {0}")] + AlreadyUpgraded(Epoch), + + #[error("reconfiguration coordinator must be a member of the new group")] + CoordinatorMustBeAMemberOfNewGroup, + + #[error( + "number of members: {num_members:?} must be greater than threshold: \ + {threshold:?}" + )] + ThresholdMismatch { num_members: usize, threshold: Threshold }, + + #[error( + "invalid membership size: {0:?}: must be between 3 and 32 inclusive" + )] + InvalidMembershipSize(usize), + + #[error( + "invalid threshold: {0:?}: threshold must be between 2 and 31 inclusive" + )] + InvalidThreshold(Threshold), + + #[error( + "sled has already prepared a request at epoch {existing:?}, \ + and cannot prepare another at a smaller or equivalent epoch {new:?}" + )] + PreparedEpochMismatch { existing: Epoch, new: Epoch }, + + #[error("epoch must be at least 2 as the LRTQ epoch is 1. got {0}")] + EpochMustBeAtLeast2(Epoch), + + #[error(transparent)] + Configuration(#[from] ConfigurationError), +} + +impl<'a> From<&'a ValidatedReconfigureMsg> for NewConfigParams<'a> { + fn from(value: &'a ValidatedReconfigureMsg) -> Self { + Self { + rack_id: value.rack_id, + epoch: value.epoch, + members: &value.members, + threshold: value.threshold, + coordinator_id: &value.coordinator_id, + } + } +} + /// A `ReconfigureMsg` that has been determined to be valid for the remainder /// of code paths. We encode this check into a type in a "parse, don't validate" /// manner. @@ -389,6 +461,191 @@ impl ValidatedReconfigureMsg { } } +impl<'a> From<&'a ValidatedLrtqUpgradeMsg> for NewConfigParams<'a> { + fn from(value: &'a ValidatedLrtqUpgradeMsg) -> Self { + Self { + rack_id: value.rack_id, + epoch: value.epoch, + members: &value.members, + threshold: value.threshold, + coordinator_id: &value.coordinator_id, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Diffable)] +pub struct ValidatedLrtqUpgradeMsg { + rack_id: RackUuid, + epoch: Epoch, + members: BTreeSet, + threshold: Threshold, + + // This is not included in the original `LrtqUpgradeMsg`. It's implicit in + // the node that Nexus sends the request to. + coordinator_id: PlatformId, +} + +impl ValidatedLrtqUpgradeMsg { + /// Ensure that the `LrtqUpgradeMsg` is valid and return a + /// `ValidatedLrtqUpgradeMsg` if it is. + /// + /// LRTQ upgrade does not accept idempotent requests. If a configuration has + /// been seen for a given epoch, then an error is returned. TODO: This might + /// be the right behavior for normal reconfigurations as well. Nexus is + /// not going to send a request more than once for the same epoch. For now + /// though, we leave things as is. + pub fn new( + log: &Logger, + ctx: &mut impl NodeHandlerCtx, + msg: LrtqUpgradeMsg, + ) -> Result { + let ps = ctx.persistent_state(); + + if let Some(expunged) = &ps.expunged { + error!( + log, + "LRTQ upgrade attempted on expunged node"; + "expunged_epoch" => %expunged.epoch, + "expunging_node" => %expunged.from + ); + return Err(LrtqUpgradeError::Expunged { + epoch: expunged.epoch, + from: expunged.from.clone(), + }); + } + + // If we have an LRTQ share, the rack id must match the one from Nexus + if let Some(ps_rack_id) = ps.rack_id() { + if msg.rack_id != ps_rack_id { + error!( + log, + "LRTQ upgrade attempted with invalid rack_id"; + "expected" => %ps_rack_id, + "got" => %msg.rack_id + ); + return Err(MismatchedRackIdError { + expected: ps_rack_id, + got: msg.rack_id, + } + .into()); + } + } + + if ps.lrtq.is_none() { + error!(log, "LRTQ upgrade attempted on node without LRTQ share"); + return Err(LrtqUpgradeError::NoLrtqShare); + } + + if let Some(epoch) = ps.latest_committed_epoch() { + error!( + log, + "LRTQ upgrade attempted when already upgraded"; + "committed_epoch" => %epoch + ); + return Err(LrtqUpgradeError::AlreadyUpgraded(epoch)); + } + + if !msg.members.contains(ctx.platform_id()) { + return Err(LrtqUpgradeError::CoordinatorMustBeAMemberOfNewGroup); + } + + Self::check_membership_sizes(&msg)?; + Self::check_epoch(ctx, &msg)?; + + let LrtqUpgradeMsg { rack_id, epoch, members, threshold } = msg; + + Ok(ValidatedLrtqUpgradeMsg { + rack_id, + epoch, + members, + threshold, + coordinator_id: ctx.platform_id().clone(), + }) + } + + pub fn epoch(&self) -> Epoch { + self.epoch + } + + pub fn coordinator_id(&self) -> &PlatformId { + &self.coordinator_id + } + + /// Verify that the cluster membership and threshold sizes are within + /// constraints. + /// + /// This is essentially a copy of the method for `ValidatedReconfigureMsg`, + /// but with different types. + fn check_membership_sizes( + msg: &LrtqUpgradeMsg, + ) -> Result<(), LrtqUpgradeError> { + let num_members = msg.members.len(); + if num_members <= msg.threshold.0 as usize { + return Err(LrtqUpgradeError::ThresholdMismatch { + num_members, + threshold: msg.threshold, + }); + } + + if num_members < 3 || num_members > 32 { + return Err(LrtqUpgradeError::InvalidMembershipSize(num_members)); + } + + if msg.threshold.0 < 2 || msg.threshold.0 > 31 { + return Err(LrtqUpgradeError::InvalidThreshold(msg.threshold)); + } + + Ok(()) + } + + // Ensure that the epoch for this LRTQ upgrade is valid + fn check_epoch( + ctx: &mut impl NodeHandlerCtx, + msg: &LrtqUpgradeMsg, + ) -> Result<(), LrtqUpgradeError> { + // Epochs for LRTQ upgrades must start at 2, as the LRTQ epoch is always 1. + if msg.epoch < Epoch(2) { + return Err(LrtqUpgradeError::EpochMustBeAtLeast2(msg.epoch)); + } + + // Ensure that we haven't seen a newer configuration + if let Some(latest_config) = ctx.persistent_state().latest_config() { + if msg.epoch <= latest_config.epoch { + return Err(LrtqUpgradeError::PreparedEpochMismatch { + existing: latest_config.epoch, + new: msg.epoch, + }); + } + } + + Ok(()) + } +} + +// For diffs we want to allow access to all fields, but not make them public in +// the `ValidatedLrtqUpgradeMsg` type itself. +impl<'daft> ValidatedLrtqUpgradeMsgDiff<'daft> { + pub fn rack_id(&self) -> Leaf<&RackUuid> { + self.rack_id + } + + pub fn epoch(&self) -> Leaf<&Epoch> { + self.epoch + } + + pub fn members(&self) -> &BTreeSetDiff<'daft, PlatformId> { + &self.members + } + + pub fn threshold(&self) -> Leaf<&Threshold> { + self.threshold + } + + pub fn coordinator_id(&self) -> Leaf<&PlatformId> { + self.coordinator_id + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/trust-quorum/test-utils/Cargo.toml b/trust-quorum/test-utils/Cargo.toml index 0e20b7c8466..33181dc1ddf 100644 --- a/trust-quorum/test-utils/Cargo.toml +++ b/trust-quorum/test-utils/Cargo.toml @@ -7,14 +7,17 @@ edition = "2024" workspace = true [dependencies] +bootstore.workspace = true camino.workspace = true daft.workspace = true dropshot.workspace = true gfss = { workspace = true, features = ["danger_partial_eq_ct_wrapper"] } iddqd.workspace = true omicron-uuid-kinds.workspace = true +secrecy.workspace = true serde.workspace = true serde_json.workspace = true +sled-hardware-types.workspace = true slog.workspace = true trust-quorum = { workspace = true, features = ["danger_partial_eq_ct_wrapper", "testing"] } diff --git a/trust-quorum/test-utils/src/event.rs b/trust-quorum/test-utils/src/event.rs index 89c7c93bacc..9748cc2f947 100644 --- a/trust-quorum/test-utils/src/event.rs +++ b/trust-quorum/test-utils/src/event.rs @@ -20,6 +20,10 @@ pub enum Event { config: NexusConfig, crashed_nodes: BTreeSet, }, + InitialSetupLrtq { + member_universe_size: usize, + config: NexusConfig, + }, AbortConfiguration(Epoch), SendNexusReplyOnUnderlay(NexusReply), /// Call `Node::handle` with the given Envelope. @@ -33,6 +37,7 @@ pub enum Event { DeliverNexusReply(NexusReply), CommitConfiguration(PlatformId), Reconfigure(NexusConfig), + LrtqUpgrade(NexusConfig), CrashNode(PlatformId), RestartNode { id: PlatformId, @@ -48,6 +53,9 @@ impl Event { Self::InitialSetup { config, crashed_nodes, .. } => { config.members.union(&crashed_nodes).cloned().collect() } + Self::InitialSetupLrtq { config, .. } => { + config.members.iter().cloned().collect() + } Self::AbortConfiguration(_) => vec![], Self::SendNexusReplyOnUnderlay(_) => vec![], Self::DeliverEnvelope(envelope) => vec![envelope.to.clone()], @@ -56,6 +64,7 @@ impl Event { Self::ClearSecrets(id) => vec![id.clone()], Self::CommitConfiguration(id) => vec![id.clone()], Self::Reconfigure(_) => vec![], + Self::LrtqUpgrade(_) => vec![], Self::CrashNode(id) => vec![id.clone()], Self::RestartNode { id, connection_order } => { let mut nodes = connection_order.clone(); diff --git a/trust-quorum/test-utils/src/nexus.rs b/trust-quorum/test-utils/src/nexus.rs index 4ee353bf704..da766049077 100644 --- a/trust-quorum/test-utils/src/nexus.rs +++ b/trust-quorum/test-utils/src/nexus.rs @@ -10,7 +10,9 @@ use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; use omicron_uuid_kinds::RackUuid; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; -use trust_quorum::{Epoch, PlatformId, ReconfigureMsg, Threshold}; +use trust_quorum::{ + Epoch, LrtqUpgradeMsg, PlatformId, ReconfigureMsg, Threshold, +}; // The operational state of nexus for a given configuration #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Diffable)] @@ -18,6 +20,7 @@ pub enum NexusOp { Committed, Aborted, Preparing, + LrtqCommitted, } /// A single nexus configuration @@ -74,6 +77,28 @@ impl NexusConfig { } } + // An LRTQ config can only be the initial config. + // + // We create it so that we can test upgrading out of it. + pub fn new_lrtq( + coordinator: PlatformId, + members: BTreeSet, + ) -> NexusConfig { + let threshold = Threshold((members.len() / 2 + 1) as u8); + NexusConfig { + // We start committed, since we aren't actually running the LRTQ protocol + op: NexusOp::LrtqCommitted, + epoch: Epoch(1), + last_committed_epoch: None, + coordinator, + members, + threshold, + commit_crash_tolerance: 0, + prepared_members: BTreeSet::new(), + committed_members: BTreeSet::new(), + } + } + pub fn to_reconfigure_msg(&self, rack_id: RackUuid) -> ReconfigureMsg { ReconfigureMsg { rack_id, @@ -84,6 +109,15 @@ impl NexusConfig { } } + pub fn to_lrtq_upgrade_msg(&self, rack_id: RackUuid) -> LrtqUpgradeMsg { + LrtqUpgradeMsg { + rack_id, + epoch: self.epoch, + members: self.members.clone(), + threshold: self.threshold, + } + } + // Are there enough prepared members to commit? pub fn can_commit(&self) -> bool { self.prepared_members.len() @@ -126,6 +160,14 @@ impl NexusState { (&config.coordinator, config.to_reconfigure_msg(self.rack_id)) } + // Create an `LrtqUpgradeMsg` for the latest nexus config + pub fn lrtq_upgrade_msg_for_latest_config( + &self, + ) -> (&PlatformId, LrtqUpgradeMsg) { + let config = self.configs.iter().last().expect("at least one config"); + (&config.coordinator, config.to_lrtq_upgrade_msg(self.rack_id)) + } + pub fn latest_config(&self) -> &NexusConfig { self.configs.iter().last().expect("at least one config") } @@ -146,6 +188,19 @@ impl NexusState { } found } + + pub fn needs_upgrade_from_lrtq(&self) -> bool { + // If we don't have any committed TQ configurations and + // the configuration for epoch 1 is LRTQ then we must + // call `Node::coordinate_upgrade_from_lrtq` rather than + // `Node::coordinate_reconfiguration`. + self.last_committed_config().is_none() + && self + .configs + .get(&Epoch(1)) + .map(|c| c.op == NexusOp::LrtqCommitted) + .unwrap_or(false) + } } #[derive( diff --git a/trust-quorum/test-utils/src/state.rs b/trust-quorum/test-utils/src/state.rs index cc02c4663d1..2479f9e60d7 100644 --- a/trust-quorum/test-utils/src/state.rs +++ b/trust-quorum/test-utils/src/state.rs @@ -8,16 +8,20 @@ use crate::nexus::{ NexusConfig, NexusOp, NexusReply, NexusState, NexusStateDiff, }; use crate::{Event, member_universe}; +use bootstore::schemes::v0::SharePkgCommon; use daft::{BTreeMapDiff, BTreeSetDiff, Diffable, Leaf}; use iddqd::IdOrdMap; +use omicron_uuid_kinds::GenericUuid; +use secrecy::ExposeSecretMut; +use sled_hardware_types::Baseboard; use slog::{Logger, info}; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::Display; use trust_quorum::{ - Configuration, CoordinatorOperation, CoordinatorStateDiff, Envelope, Epoch, - LoadRackSecretError, Node, NodeCallerCtx, NodeCommonCtx, NodeCtx, - NodeCtxDiff, NodeDiff, PeerMsgKind, PlatformId, - ValidatedReconfigureMsgDiff, + Configuration, CoordinatingMsg, CoordinatorOperation, CoordinatorStateDiff, + Envelope, Epoch, LoadRackSecretError, Node, NodeCallerCtx, NodeCommonCtx, + NodeCtx, NodeCtxDiff, NodeDiff, PeerMsgKind, PersistentState, PlatformId, + ValidatedLrtqUpgradeMsgDiff, ValidatedReconfigureMsgDiff, }; // The state of our entire system including the system under test and @@ -86,8 +90,6 @@ impl TqState { } /// Send the latest `ReconfigureMsg` from `Nexus` to the coordinator node - /// - /// If the node is not available, then abort the configuration at nexus pub fn send_reconfigure_msg(&mut self) { let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); let epoch_to_config = msg.epoch; @@ -117,6 +119,22 @@ impl TqState { } } + /// Send the latest `LrtqUpgradeMsg` from nexus to the coordinator node + pub fn send_lrtq_upgrade_msg(&mut self) { + let (coordinator, msg) = + self.nexus.lrtq_upgrade_msg_for_latest_config(); + if !self.crashed_nodes.contains(coordinator) { + let (node, ctx) = self + .sut + .nodes + .get_mut(coordinator) + .expect("coordinator exists"); + + node.coordinate_upgrade_from_lrtq(ctx, msg) + .expect("valid configuration"); + } + } + /// Check postcondition assertions after initial configuration pub fn postcondition_initial_configuration(&mut self) { let (coordinator, msg) = self.nexus.reconfigure_msg_for_latest_config(); @@ -199,6 +217,12 @@ impl TqState { crashed_nodes, ); } + Event::InitialSetupLrtq { member_universe_size, config } => { + self.apply_event_initial_config_lrtq( + member_universe_size, + config, + ); + } Event::AbortConfiguration(epoch) => { self.apply_event_abort_configuration(epoch) } @@ -223,6 +247,9 @@ impl TqState { Event::Reconfigure(nexus_config) => { self.apply_event_reconfigure(nexus_config) } + Event::LrtqUpgrade(nexus_config) => { + self.apply_event_lrtq_upgrade(nexus_config) + } Event::CrashNode(id) => { self.apply_event_crash_node(id); } @@ -276,6 +303,60 @@ impl TqState { self.send_envelopes_from_coordinator(); } + fn apply_event_initial_config_lrtq( + &mut self, + member_universe_size: usize, + config: NexusConfig, + ) { + // Generate the member universe + self.member_universe = member_universe(member_universe_size); + + // Translate `PlatformId`s to `Baseboards`s for LRTQ membership + let baseboards: BTreeSet<_> = config + .members + .iter() + .cloned() + .map(|id| { + Baseboard::new_pc( + id.serial_number().to_string(), + id.part_number().to_string(), + ) + }) + .collect(); + + // Create the LRTQ key share packages and take only the common data, + // which is what we use for trust quorum upgrade. + let share_pkgs = config + .members + .iter() + .cloned() + .zip( + bootstore::schemes::v0::create_pkgs( + self.nexus.rack_id.into_untyped_uuid(), + baseboards.clone(), + ) + .unwrap() + .expose_secret_mut() + .iter() + .map(|pkg| pkg.common.clone()), + ) + .collect(); + + // Create the SUT nodes + self.sut = + Sut::new_lrtq(&self.log, self.member_universe.clone(), share_pkgs); + + // Inform nexus about the initial configuration + self.nexus.configs.insert_unique(config).expect("new config"); + + // Establish bootstrap network connections between all nodes + for (from, (node, ctx)) in self.sut.nodes.iter_mut() { + for to in self.member_universe.iter().filter(|id| from != *id) { + node.on_connect(ctx, to.clone()); + } + } + } + fn apply_event_commit(&mut self, id: PlatformId) { let rack_id = self.nexus.rack_id; let latest_config = self.nexus.latest_config(); @@ -354,7 +435,9 @@ impl TqState { ); } Err(LoadRackSecretError::NoCommittedConfigurations) => { - assert!(ctx.persistent_state().is_uninitialized()); + assert!( + ctx.persistent_state().latest_committed_epoch().is_none() + ); } Err(LoadRackSecretError::NotCommitted(epoch)) => { assert!(!ctx.persistent_state().commits.contains(&epoch)); @@ -543,6 +626,12 @@ impl TqState { self.send_envelopes_from_coordinator(); } + fn apply_event_lrtq_upgrade(&mut self, nexus_config: NexusConfig) { + self.nexus.configs.insert_unique(nexus_config).expect("new config"); + self.send_lrtq_upgrade_msg(); + self.send_envelopes_from_coordinator(); + } + // Commit at nexus when preparing fn nexus_commit(&mut self) { let mut latest_config = self.nexus.latest_config_mut(); @@ -625,6 +714,29 @@ impl Sut { .collect(); Sut { nodes } } + + pub fn new_lrtq( + log: &Logger, + universe: Vec, + mut share_pkgs: BTreeMap, + ) -> Sut { + // Populate the persistent state of each member in the LRTQ cluster + // with a share pkg + let nodes = universe + .into_iter() + .map(|id| { + let mut ctx = if let Some(pkg) = share_pkgs.remove(&id) { + let ps = PersistentState::new_lrtq_only(pkg); + NodeCtx::new_with_persistent_state(id.clone(), ps) + } else { + NodeCtx::new(id.clone()) + }; + let node = Node::new(log, &mut ctx); + (id, (node, ctx)) + }) + .collect(); + Sut { nodes } + } } /***************************************************************************** @@ -871,23 +983,13 @@ fn display_node_diff( writeln!( f, " started coordinating at epoch {}", - node_diff - .coordinator_state() - .after - .unwrap() - .reconfigure_msg() - .epoch() + node_diff.coordinator_state().after.unwrap().msg().epoch() )?; } else if node_diff.coordinator_state().after.is_none() { writeln!( f, " stopped coordinating at epoch {}", - node_diff - .coordinator_state() - .before - .unwrap() - .reconfigure_msg() - .epoch() + node_diff.coordinator_state().before.unwrap().msg().epoch() )?; } else { let before = node_diff.coordinator_state().before.unwrap(); @@ -945,7 +1047,25 @@ pub fn display_coordinator_state_diff( diff: CoordinatorStateDiff<'_>, f: &mut std::fmt::Formatter<'_>, ) -> std::fmt::Result { - display_validated_reconfigure_msg_diff(diff.reconfigure_msg(), f)?; + match (diff.msg().before, diff.msg().after) { + (CoordinatingMsg::Reconfig(a), CoordinatingMsg::Reconfig(b)) => { + display_validated_reconfigure_msg_diff(a.diff(b), f)?; + } + (CoordinatingMsg::Upgrade(a), CoordinatingMsg::Upgrade(b)) => { + display_validated_lrtq_upgrade_msg_diff(a.diff(b), f)?; + } + (CoordinatingMsg::Reconfig(_), CoordinatingMsg::Upgrade(_)) => { + panic!("Cannot go from reconfiguring to LRTQ upgrade"); + } + (CoordinatingMsg::Upgrade(a), CoordinatingMsg::Reconfig(b)) => { + writeln!( + f, + " Went from LRTQ upgrade to Reconfig: epoch: {} -> {}", + a.epoch(), + b.epoch() + )?; + } + } // Configuration contains roughly the same information as a // `ValidatedReconfigureMsg`. Let's report the only relevant change. @@ -959,7 +1079,7 @@ pub fn display_coordinator_state_diff( } pub fn display_validated_reconfigure_msg_diff( - diff: &ValidatedReconfigureMsgDiff<'_>, + diff: ValidatedReconfigureMsgDiff<'_>, f: &mut std::fmt::Formatter<'_>, ) -> std::fmt::Result { // diff.rack_id changes when tqdb `rewind` command is used, which makes it @@ -1011,6 +1131,51 @@ pub fn display_validated_reconfigure_msg_diff( Ok(()) } +pub fn display_validated_lrtq_upgrade_msg_diff( + diff: ValidatedLrtqUpgradeMsgDiff, + f: &mut std::fmt::Formatter<'_>, +) -> std::fmt::Result { + // diff.rack_id changes when tqdb `rewind` command is used, which makes it + // confusing. It never changes inside tests, so no need to diff it. + if diff.epoch().is_modified() { + writeln!( + f, + " epoch: {} -> {}", + diff.epoch().before, + diff.epoch().after + )?; + } + if !diff.members().added.is_empty() { + writeln!(f, " added members:")?; + for member in &diff.members().added { + writeln!(f, " {member}")?; + } + } + if !diff.members().removed.is_empty() { + writeln!(f, " removed members:")?; + for member in &diff.members().removed { + writeln!(f, " {member}")?; + } + } + if diff.threshold().is_modified() { + writeln!( + f, + " threshold: {} -> {}", + diff.threshold().before, + diff.threshold().after + )?; + } + // Always write out the coordinator id. It's useful for digging. + writeln!( + f, + " coordinator: {} -> {}", + diff.coordinator_id().before, + diff.coordinator_id().after, + )?; + + Ok(()) +} + pub fn display_coordinator_operation_diff( diff: Leaf<&CoordinatorOperation>, f: &mut std::fmt::Formatter<'_>, @@ -1050,8 +1215,14 @@ pub fn display_coordinator_operation_diff( } } ( - CoordinatorOperation::CollectLrtqShares { shares: before, .. }, - CoordinatorOperation::CollectLrtqShares { shares: after, .. }, + CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares: before, + .. + }, + CoordinatorOperation::CollectLrtqShares { + collected_lrtq_shares: after, + .. + }, ) => { if before != after { writeln!(f, " collected lrtq shares differ")?; diff --git a/trust-quorum/tests/cluster.proptest-regressions b/trust-quorum/tests/cluster.proptest-regressions index d432e0f97f8..d28b10d011a 100644 --- a/trust-quorum/tests/cluster.proptest-regressions +++ b/trust-quorum/tests/cluster.proptest-regressions @@ -13,3 +13,4 @@ cc 72be7c5cebd3603aadfd2d88416a2b1e822ac7e86a44a9f5956a17a5b6a7c334 # shrinks to cc cd2b5981d4029117a8f650570bf67917328a1e85e1a0dff7b333cb3b3ec267ec # shrinks to input = _TestTrustQuorumProtocolArgs { input: TestInput { initial_config: GeneratedConfiguration { members: {0, 2, 4, 6, 8, 12, 14}, threshold: Index(4922076049451636973) }, initial_down_nodes: {5, 6, 9, 11}, actions: [LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17051577634505825172), Index(3831015544086811756), Index(16890911626444384577), Index(9665680836313868673), Index(12291299896308727247), Index(11842813514802256094), Index(4206698851704481185), Index(17239392660685696156), Index(8108826513344288983), Index(995555394547547943), Index(4862537611771439422), Index(7103770413416081865), Index(7666197506136050605), Index(10313438350388663906), Index(9799229200806910805)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(7052354899929269634)), Commit(Index(12754593430247573957)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(7098496255410508935)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(7064460230614137187), Index(15061612349683899297), Index(3369326351677890631), Index(14114770909359522054), Index(2725203562132149837), Index(5289341536801950173), Index(10620641296938553550), Index(4676050646368714346), Index(14971715550224699023), Index(15241567877008668480), Index(4046398187268318006), Index(164277420218539909), Index(9187949102836217441), Index(133602391934601715), Index(2747993447963367731)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2149743622211910352), Index(13876625335597459653), Index(12267756793048541210), Index(6987825774047098593), Index(17049126207458697896), Index(14665751046296640405), Index(3677402988930638291), Index(3346711794499282174), Index(15492569502319594109), Index(15451994600841200045), Index(14588991772583008258), Index(4333856395358031753), Index(9763436209078247837), Index(16887231443740957681), Index(6010677374404110198)] }, DeliverNexusReply, Commit(Index(3327363521485328095)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16447934661473974938)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17730505644207223797)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8712313740743754486)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6264584198079306473)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(7175077154395615146)), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(101186827837765703), Index(17517206090751605003), Index(1322824823493258153), Index(15167508973938988133), Index(17891906801475768918), Index(3111371597580388946), Index(12279097487601739012), Index(16343225343032683932), Index(17173499143055322582), Index(1144855841348396100), Index(5506132012443977573), Index(2203629787744437870), Index(11634062102205121486), Index(6233601287893775315), Index(8926347667196942995)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7174220848789458878)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, Commit(Index(11954793656920760763)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9196277561863114407)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, Commit(Index(10448865765267154681)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16233102013938229427), Index(15517535268399227068), Index(4850234934282258115), Index(12135279507100737496), Index(13873862192653516951), Index(3174748510347129021), Index(1728918049804157265), Index(1451862753275199203), Index(10704432659342936030), Index(7192195786121903338), Index(16108582134752161689), Index(4207122711860152508), Index(10964913360211477584), Index(15175793703605743221), Index(2884439556706535360)] }, Commit(Index(17115700878157918178)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4171602340395337631)), Commit(Index(8368303780513306101)), Commit(Index(7695114181351836049)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(4679372184395745508), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(2267563778139139851)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(12788066934602729878)), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(14173576280053807566)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10634761796247405721)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10282072849713701802), Index(5947601229998026984), Index(5152298920412573714), Index(10683952147876856836), Index(16453290930182093045), Index(6905785950394404962), Index(6404520298562606349), Index(3999866652959092855), Index(6693239171277296196), Index(3747997142585910304), Index(11732603487319006951), Index(10790316400056217689), Index(15669803336698314898), Index(9876501373025146057), Index(17883110763827518228)] }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14870358054653239963)), Commit(Index(16825118473595723264)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17031762881990045509)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16382293306095495811)), Commit(Index(6087707168071737559)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(1432238778963191264)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10433983532626434493)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12299141931584957678)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2980372941386579479), Index(11499316717877659421), Index(17666611416295413900), Index(1706762805924846356), Index(1695654521629890730), Index(15144896380800379216), Index(17216745583685560774), Index(17379728704288921804), Index(16514245884343446186), Index(12868596633778996287), Index(6944884607028676838), Index(12834280356516960744), Index(12753064801603036564), Index(14987823822359945792), Index(2931769106841458776)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4841797463498010315)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(12822495199898565512)), DeliverNexusReply, DeliverNexusReply, Commit(Index(5563195759908570006)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16242443110239879164), Index(11786391714420221339), Index(5544998565372014821), Index(10810895014332503470), Index(7060138458049070593), Index(9167784885439911903), Index(9146125065988473107), Index(5394399841765472715), Index(217344787918345443), Index(12232397562619453865), Index(5697343310757616302), Index(9725754547221987605), Index(8195461235968704697), Index(11993519760641901531), Index(13513490530112829188)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8059720552996654969)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17179395512027282938)), DeliverNexusReply, Commit(Index(5880368375546964225)), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13015427150187339913), Index(14125085474738109667), Index(16121752336432923205), Index(12819425392947159729), Index(12736790604732727373), Index(15769037642628613866), Index(10018712401691295359), Index(2631650625698798889), Index(2725984402676214707), Index(7732996496276653531), Index(12536700867668013499), Index(11461505568950390933), Index(16313956737186888253), Index(13421775173101756868), Index(18166542693212148355)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16639034686070453675)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1681184024199223286)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17319842111584966471)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Commit(Index(14192692437521822641)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7383318225056786580)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2019580864964819018)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(1700840490885557878)), Commit(Index(16975997317231806745)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10650051016973543748)), Commit(Index(12452593774586707019)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7476937302333349162)), Commit(Index(2412525153349107653)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(10036523030711719197)), Commit(Index(8682608472812529490)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18101611655811196727)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(5186016773498569774), Index(132532083153160447), Index(17810286002717543469), Index(14277753432417005720), Index(15593440890792468543), Index(1156050194482155282), Index(18028601213385766078), Index(194252493695099155), Index(2679178555643088418), Index(11853726572322713264), Index(7258262588366646826), Index(6897102514752591414), Index(13339097464131300386), Index(10424516247375452150), Index(7193058486290482930)] }, PollPrepareAcks, Commit(Index(1412529891067244651)), Commit(Index(4047791368697623749)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15056106359419808605)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13230408158989636690), Index(233875710952601816), Index(15261708690740688031), Index(683465460544675814), Index(1628910701307916351), Index(3571885322355244733), Index(13250802668656604682), Index(11612797159308862936), Index(17620586467583527078), Index(4457252278275803458), Index(8023856985282651352), Index(3506061455240584828), Index(401684046567582621), Index(2505356381232598292), Index(1364087778174231353)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 3, removed_nodes: [], threshold: Index(8187046744586550677), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Commit(Index(2937757642577398104)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17950094175408090163)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4618633886116975573)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12624420278898205313)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4177990725557858017)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(3019281181724435239), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(13807300905840639959), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(16530773166540691377)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4870194832221295646), Index(1363580812781228068), Index(17110551817792475219), Index(11435524437885038780), Index(4033592148502742805), Index(12946173963652273315), Index(6549186305417294979), Index(14143093099286295437), Index(1404523355062555763), Index(17953450642991675828), Index(14258345573119745151), Index(10498951104431172001), Index(13587786547914815630), Index(8202300286441660485), Index(7288474758064071125)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(2361089447035562838)), Commit(Index(9267295574628462173)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(8123568850588588453)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10695732873790710081), Index(9839392904429549496), Index(7131851238346132996), Index(17560638496806781352), Index(17713745800252388060), Index(17364209985670492928), Index(2289714096417994843), Index(133513604553714170), Index(17113193907055287424), Index(16736407992791278622), Index(7277547167163124694), Index(14130032182854221965), Index(13119822580677839738), Index(11095508136719220012), Index(4390013733973048866)] }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(824306625778284496)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14629498707385828653), Index(7006555700622216631), Index(18041755403059946691), Index(1250443925193459691), Index(4253275313529233324), Index(12175537879183477548), Index(5828805464841220963), Index(7901448698250598269), Index(4751209514792230062), Index(8162847003661456277), Index(13546391332711705224), Index(13477751777043819246), Index(8005415724266127661), Index(5427271158954742689), Index(12710465633020803687)] }, PollPrepareAcks, Commit(Index(9677970125207266594)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1657119859510662750)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11547376774784751562)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(3962881759619885068)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15858251392397180199)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(490869607664210062)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9865886852320544626), Index(13430654084406998787), Index(15548218822116576762), Index(583321479251467799), Index(3281244705166530319), Index(8653321919566072479), Index(11440243044071660013), Index(10227886161460846539), Index(5039870352778481174), Index(17743989601631985672), Index(14842090655257016206), Index(3487367414444269276), Index(3552562951007140970), Index(17363713992031314443), Index(14466730776790792168)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11646051174854747055)), DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17199582805898314812)), Commit(Index(9073526438776693187)), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15975827634315564737)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8197477490788839111)), Commit(Index(14530235219676753460)), DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15616103519845618715)), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [], threshold: Index(7162323291192172547), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(3972361357583473697)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(16275913925876225182), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Commit(Index(4197051869709144239)), Commit(Index(3468393008533451478)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(10181478905158959854)), PollPrepareAcks, DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(15779334003328403726)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(8591683930050853112), Index(7744923939876411274), Index(4411641040551614829), Index(8431832707238306111), Index(3715336501155699616), Index(5232759279149515960), Index(3368871001707029403), Index(744650282004479689), Index(6931445563188487168), Index(9204799676196131940), Index(1277817853084666648), Index(17968419916423554251), Index(1089059600954861792), Index(14886768989806333543), Index(4380473274526372038)] }, PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5104159162291010518)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(812118670453958213)), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(13625840138587159640), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [], threshold: Index(5377244991436450603), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(689707717768049870)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7430204133433226052)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14156221436216220148)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16490686062880952495)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Reconfigure { num_added_nodes: 3, removed_nodes: [], threshold: Index(2366218314010672085), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1834941384916128295)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Commit(Index(6202942864433254534)), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(9602044939604628006)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3734837473068939900)), PollPrepareAcks, Commit(Index(3301756193020854695)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6599150313944062977), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(5952924621171053901)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14131957494060192184), Index(11340918572145316313), Index(12461082044764980040), Index(7772345818617811523), Index(16259544179605909725), Index(10842195740829430023), Index(13581764643375544994), Index(57752700841281921), Index(14012289914224984000), Index(10827330589305942731), Index(230999292110167485), Index(1512383515779132010), Index(2338608541149556283), Index(8772692902412063202), Index(12110177091293683122)] }, PollPrepareAcks, Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6028637123621942904), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(12925916855182217604)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17679559364516950727), Index(10658851231864160316), Index(9511320859296210231), Index(10398075050085432907), Index(7252535444180724993), Index(2529257840014543616), Index(1574743271117465603), Index(17371737079110568232), Index(6162334040592398889), Index(860854455807221672), Index(4356162313998209905), Index(4271720407365340920), Index(9226866134197584402), Index(17598712919195356559), Index(10663135083251968961)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(1745074793952622470)), PollPrepareAcks, Commit(Index(6378775523425968782)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3316058171440047416)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17188110789543099477)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17767423552026381542)), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10733440019164713240)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10363992686064633994)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4744152509584477349)), Commit(Index(3556907620443580103)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(905100905381939051)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14996853237077750348), Index(4678446063427465251), Index(7560023464230976812), Index(8976757141544537377), Index(17914919155001548823), Index(10957565787611013364), Index(6064952955042436892), Index(174670626774750033), Index(16777671116792072220), Index(6432199933481497733), Index(1267526569814315456), Index(12086913749718857172), Index(14799196189762668374), Index(14861960545562601798), Index(9470469121302702396)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 2, removed_nodes: [], threshold: Index(7909056460517134793), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15832577660388163596)), DeliverNexusReply, Commit(Index(17686227315912518779)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11310462913291337143)), Commit(Index(4121411838601482050)), Commit(Index(11087027695242367620)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(3637203987479958233)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10971213536806683444)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(8988459050636666179)), Commit(Index(10582630922030989908)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17461714755185114147), Index(13020952303951379335), Index(11232565586081316915), Index(1244583948400582520), Index(5705104754842606074), Index(2154104231746125758), Index(6121794624320839062), Index(14793579746106595229), Index(15736659202050887638), Index(10525226381864210095), Index(5190154888761854255), Index(3239142960923335387), Index(448101930271986746), Index(2640723897558794050), Index(9719787769384959716)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11337667084508129157)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(167052824055177625)), PollPrepareAcks, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11160934652183200065), Index(2084227564724952126), Index(9818220769432984046), Index(3574221108476356297), Index(17273079891665311323), Index(14403119174919765511), Index(7189914664245388687), Index(3871789853245834314), Index(12135283788501975497), Index(7549956165917354823), Index(145281709432801009), Index(2289543025843484579), Index(7138399324154539582), Index(16924902409779711020), Index(9820556657264080911)] }, Commit(Index(12258651016382338553)), Commit(Index(3895562504131697769)), Commit(Index(10793137473030493621)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16310513143649523084)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(12641316420250763654), Index(17975979270382110434), Index(7969488033185255286), Index(9691551252332613642), Index(7584974703258936897), Index(15021663946065735833), Index(1758712350977764172), Index(14342333619971092230), Index(4792449003741762364), Index(1136549237574737665), Index(10565603900057000493), Index(14821585321272739931), Index(10198910864770028171), Index(17222133201323436445), Index(11111355870814234687)] }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(12742147637641018798)), Commit(Index(6957641080389416876)), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(313776582786143703)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1664121866990271272)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(11659575867485009345)), Commit(Index(18067011543374573881)), Commit(Index(11885377546410699621)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(13753780910168417227)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5148574437729757334)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(8796326199216900888), Index(13392737084561559503), Index(1262572027688511092), Index(3876589451564473911), Index(1333123572683279204), Index(11259168888428464625), Index(5021752053544058732), Index(14987327626964295015), Index(15424062316362649270), Index(9827129957979563989), Index(5595913552271613123), Index(9300959864932439478), Index(2100744948758373424), Index(16359571486529762176), Index(9969183398881659876)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(18214702891238200362)), PollPrepareAcks, Commit(Index(6703391561123670049)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1956846826019877210)), PollPrepareAcks, Commit(Index(17018377695673107941)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(857191511468954398), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2502085428058301316), Index(17576543230127436364), Index(6241310278819475541), Index(1075974156471829200), Index(6436787676373328454), Index(8414510703386561905), Index(11108872544162713621), Index(18160553755501499005), Index(9825656053245321406), Index(2941168764172333357), Index(10300363443471457449), Index(9500064545533465260), Index(9192968020536100876), Index(9559148243381515571), Index(3476666947945761877)] }, DeliverNexusReply, Commit(Index(5025003416557316740)), DeliverNexusReply, DeliverNexusReply, Commit(Index(10554519239409899070)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15359822900691079218), Index(7475733851503630841), Index(5413253096674264164), Index(205930021387981785), Index(10532242808082630697), Index(1592234622374005420), Index(825566843304345289), Index(2143512977012276400), Index(1890444646702764353), Index(7067236113193251304), Index(11365056931157803850), Index(5175479985009796885), Index(4734600699647466620), Index(197503596529236461), Index(7732850365897400598)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16328346398726451979)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3352855820482450442)), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(10403760662093762868), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3755262502306292296)), PollPrepareAcks, Commit(Index(15528476257646666429)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14915118703454641349), Index(12606135932861309293), Index(11645487045892541182), Index(9200051187323140673), Index(8556136118127597692), Index(17729888266364213311), Index(6477890378707099211), Index(14239646971150598180), Index(13618123726457667765), Index(12236193089278002404), Index(4248861295414349782), Index(3213083520730321225), Index(13606460370118745100), Index(1620038485956106114), Index(18442355481290404709)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3303325794170457376)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14724817520773391893)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1814329411362539216)), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1076989487855234859)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10318206573415247197)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18033601967237509168)), Commit(Index(1957633013818606131)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15491232609596259352)), Commit(Index(14829514142510922089)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9168570811245162140)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17559199460634196955)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(7918350360797422553), Index(16289507273077572149), Index(147189753956489544), Index(10386023087425729252), Index(12128084643005911159), Index(1094203925942081556), Index(9317683642392619141), Index(1789715873006474562), Index(8260274405116202169), Index(12950680051412833701), Index(6233210823697123496), Index(8647355079261009991), Index(1509960630923911186), Index(10548194008682659624), Index(1425681426674433136)] }, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(326566271626747449), Index(9745296941656660043), Index(18215168046550692780), Index(10529062673657556884), Index(14715307344249683118), Index(15769406383785252481), Index(16198445214892865508), Index(3500637954559395155), Index(13254208433835674922), Index(9624103807458146240), Index(12633010690516550767), Index(12420585405031437628), Index(4885870654439323288), Index(2955743237033383629), Index(17020057201522034514)] }, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3301938337204498694)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12916005972050168552)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7045744188453296225)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(6998067642536819531)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2514064917502790724), Index(6151577949469690111), Index(4734462965115633288), Index(16888307710238936687), Index(5539110532781003438), Index(6170365205005218903), Index(12767852141132498926), Index(2975466260267451303), Index(16660908705418341544), Index(11606385950329176109), Index(579113115265449342), Index(7726558772549778321), Index(806991652290845251), Index(2498949432733986105), Index(3589813191869529)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12098143581754102013)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(1717015534442339917), Index(16653683761437725779), Index(10152252544908173997), Index(2578512925296422197), Index(1672845548734863437), Index(1969569938534672179), Index(2439223589084009532), Index(16531688463744127478), Index(17388972730038582287), Index(5540916303917082674), Index(16684272049798230387), Index(7786363584399347660), Index(10750412613688688940), Index(15678329412353574293), Index(1964795364309793455)] }, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 })] } } cc e7ac2d9b42a54d91a5b08e7a7bc4cd66b44de64de9fd65d609c37824e838e515 # shrinks to input = _TestTrustQuorumProtocolArgs { input: TestInput { initial_config: GeneratedConfiguration { members: {0, 5, 6, 9, 11, 12}, threshold: Index(6975541973862646459) }, initial_down_nodes: {9, 11, 14}, actions: [PollPrepareAcks, DeliverNexusReply, Commit(Index(3449265691084783935)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6061225873971120093)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16393576858640061667)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(3124325381894084311)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8382225591246172129)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18271296525174538173)), DeliverNexusReply, Commit(Index(12329610729821368674)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1600576776432883088)), DeliverNexusReply, Commit(Index(12849327819257632801)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9530133982522428506)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17035749978939940515)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(12218386085331474151)), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11380646841142017018)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16257096850826134354)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(4097112902917061776)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5665126095264159893)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1335894965329139286)), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(9322861436621404243)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10571720810091891193)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(62710253388427333)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(13621134109067574552)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15173603104592623294), Index(16262718011393849906), Index(6430841770613985197), Index(170066684685862993), Index(16721354083583572418), Index(12103888720672096243), Index(13663864770379972853), Index(11514252650130555891), Index(7351678526905967564), Index(5240458157585107060), Index(9249839608010871405), Index(6013604896255063376), Index(7788244365903968299), Index(15797804184897305646), Index(4333305253375884879)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(13837320200074339077)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(7141478106587901063)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, Commit(Index(9586169847782724259)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12114608863118667802)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(14917860274045151093)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5232212871292791276)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6722252973572177232)), Commit(Index(9285196188389564719)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2593667497036216044)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4173956057468110016)), PollPrepareAcks, DeliverNexusReply, Commit(Index(4447514118767390565)), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(8411070044229944328), Index(11445510188804546854), Index(10862687232325596880), Index(9186874600468330232), Index(1250126240088930614), Index(5129704899929733248), Index(8791988487113258311), Index(17817261218651402170), Index(8068245385580142638), Index(14186159335563779017), Index(1349537999173073914), Index(14254096233480241061), Index(3549730279034358764), Index(11618734559319061870), Index(10293201394645927299)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(688860087227280671), Index(2884727634004305975), Index(9443377857531875868), Index(12227474399904361726), Index(7456128310110036700), Index(5370808737692842183), Index(7616570816049652214), Index(8865844005931872255), Index(14267583453770076446), Index(15895396641385881945), Index(13491607991542174928), Index(862808446483863020), Index(17982881104282900748), Index(15690481420055656674), Index(5143074275744953695)] }, PollPrepareAcks, Commit(Index(6100638463932339906)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Commit(Index(3343302010193509747)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(12085699404435176221), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16278490011162968816), Index(13494453464432873139), Index(15753850473029776409), Index(7345191611531005530), Index(4414321418567668707), Index(6583860926425805247), Index(3473663861749307580), Index(14228981939028657379), Index(4206829439533854005), Index(330399219284144721), Index(9277054320873919575), Index(4696221695266556811), Index(12455970634625010676), Index(16841058441771436209), Index(12522660857415208541)] }, Commit(Index(7488359805271872620)), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11309478389539182433)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(15263363928408339849)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [], threshold: Index(13132559085031060053), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(8930501774695155492)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(2116530057361087109)), Commit(Index(2670966180262409676)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(5863238604608974522)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11938175791251281025)), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(808493760324363617)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(2805816369938214125)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16194084757469366003), Index(3880563365604099385), Index(9105056881340370119), Index(3837655917690003182), Index(8774657645112233499), Index(16068265970386298981), Index(5230610501648673336), Index(5214236773694374002), Index(12342186074482591598), Index(3836868485036007320), Index(5917133813722902958), Index(9787133315689615850), Index(7647844121707928324), Index(6867923776219119553), Index(8106350700853802691)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17673940927645542021)), Commit(Index(10388894396697611650)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(12509604138465677903)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11913686765619030766)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(15538263146787205753)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16099443680192400285)), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10460954182596396792)), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18292708945116262015)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(11757407795912587538)), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10259546126870911541)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10848235306991533954)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(14996960338118560241)), Commit(Index(11318296214188657018)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16346349507233623032)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(12101053305118746356)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11243549953456397525)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(143428120171892519), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8920169011708929954)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(3462892174432120814)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(5288522001885601452), Index(5718235379552711435), Index(12720300849721893767), Index(14402958743541649679), Index(482698194275733167), Index(11796471905785274395), Index(11076402906665417420), Index(17889241483496171139), Index(6145541895624161987), Index(3769157242830167833), Index(3185340176760426316), Index(15282459985043046060), Index(700551744643342263), Index(8417234385633716711), Index(15614121719372044610)] }, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14215962995829639155)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(3483295715060172553), Index(6681162242006526671), Index(17841078186095655882), Index(11111083509431951002), Index(9118036273790730326), Index(13410310111509877113), Index(11714203847492160192), Index(10271458929005211405), Index(968212868130094654), Index(4386008744692003563), Index(15923875318454968803), Index(14809090743573864608), Index(1800654864725491567), Index(2474420574247243668), Index(1844406631927436023)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15767019584064627798)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16035042783642828512)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9376159365817913427)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18320712367513059190)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(17893866043169473576)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(12123835370366520798), Index(3345117242727207667), Index(11160867041276952303), Index(5201049312404538586), Index(3424836733465381618), Index(16447729750750823628), Index(17900607986575542396), Index(7364331643192082459), Index(2669800635015956021), Index(17355319830337257995), Index(8066151712043752604), Index(4857707408190677028), Index(9389858457804819590), Index(4940303328791047127), Index(17067645759156018413)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11823066255652165898)), Commit(Index(16456929699775003622)), Commit(Index(6419462407982208688)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10681711627828756284)), Commit(Index(16396584097239869669)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1891576586780026104)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(129086001976195375)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8034897178868009682)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3601693383542639787)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(3725428982890078833)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14784397836936008182), Index(15451966996075385004), Index(13183809776828019772), Index(5016098719254654730), Index(16653772433295963445), Index(103032799336570809), Index(11439379044613001701), Index(2148673933742704926), Index(10925474400136249785), Index(13571010686229489870), Index(15347298388536205651), Index(6743528128857453194), Index(2912330834536284648), Index(6078786224185304388), Index(4932827559768652018)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5751652907303317562)), Commit(Index(13022312587349022913)), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17822080786546377794)), Commit(Index(15883123755999919366)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(6888082033786639213)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17404010166725341446), Index(11469072048755577615), Index(13092279362221655677), Index(1801392937732101716), Index(8163692060105505431), Index(1486674680980016965), Index(14280282066592045010), Index(4979134271988864079), Index(9239769254675639888), Index(262370364906645658), Index(12636704478127279568), Index(17676863741364705210), Index(1051897062727493712), Index(5794294667936968797), Index(9145757636256562569)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10370330164180465744)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12768773152699854830)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12562168795591897410)), Commit(Index(5715669541494877957)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11598242813074020636), Index(15302586065354399390), Index(4101370078261764252), Index(7434615342679643515), Index(13910701640076551594), Index(17110434849065624470), Index(11599891140480704066), Index(3821247303919859717), Index(7218055814087310289), Index(12203620944644174545), Index(5374755614121214620), Index(8861325875647162045), Index(7592286787362580976), Index(5284153346060763437), Index(14894366766134612042)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8368729420469866360)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(684009691804516545)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, Commit(Index(10776973189111810295)), PollPrepareAcks, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11878519568069744028)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(1848806844505345789)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Commit(Index(11203981484140914192)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14781376455481736405)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2331347087737015470)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13261898331878620547)), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Commit(Index(14275187976770905113)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(17594801695606375613)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(13055567031579047367)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13490839820177821662), Index(11586504777398012621), Index(5662651573905012992), Index(784793717778566714), Index(10998253002076414218), Index(15422492622329260760), Index(12804751865883458123), Index(17020675554548841416), Index(5847595520433605370), Index(14475310799166860290), Index(3739992582284095781), Index(3400975392670230070), Index(4264723057887152298), Index(367620596746770653), Index(5062687776191159467)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17218528242151843554), Index(14230509780972052405), Index(12193814106463918695), Index(10148467356212112136), Index(18130823950113688054), Index(12330403128185638874), Index(16775883331731912107), Index(11412369750923634611), Index(10286092878498966586), Index(892356149733349372), Index(5769600234500458650), Index(10107083399296256343), Index(10264391088644543020), Index(12423096411145726991), Index(2866186688024779907)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(3157128641536953616), Index(18068041379813254713), Index(11809699372097367120), Index(1650491046373339537), Index(224186956806767038), Index(794218076868625270), Index(12953184353138292706), Index(4715975611644281142), Index(4749815815882692126), Index(12359098820577358002), Index(5922774628847241086), Index(269238526522680116), Index(17611164481643247277), Index(2738785817564075193), Index(4186361941212158793)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(12351864864158105351)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4245365800300397437)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3483810370756161828)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2193251146995308215)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13813258135329433389)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2845058203041787088)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12132981663749727424)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13722098722050747429), Index(15428065792132948760), Index(5238669253544289885), Index(5847186619236070954), Index(3783460948847167925), Index(13376198617916617020), Index(11951991807447989688), Index(6988127236033921051), Index(5729437986775275774), Index(16626638479396279373), Index(14479078198354467268), Index(12380138417050751056), Index(14585981946212180568), Index(6387140619248606594), Index(6435374814984431826)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14228906501016745934)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4274900572724176047), Index(724464416140869698), Index(1836313910407818125), Index(13094408986649210856), Index(1502773343460259827), Index(13804992041747345857), Index(8689104649048875978), Index(12965798867094934239), Index(11805149552593010512), Index(15341686951844993419), Index(8141477551948766943), Index(2842675507060576196), Index(742054788571937265), Index(17438946945919500909), Index(16492144471261553860)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10417906268591644681), Index(3806498366860422704), Index(16139403446599785623), Index(2738758371122696287), Index(12374033758879448925), Index(17012065904267601409), Index(14408593968801779260), Index(11703511620970633601), Index(1881893436044138316), Index(2888502150486357527), Index(4137808690495713385), Index(4556530974174354157), Index(3868793467590210207), Index(13332701047687529712), Index(1845199060960556886)] }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4356656905306718822), Index(8816166438620791341), Index(2944305714457600003), Index(5616342318984027085), Index(17468896308719534154), Index(16800860550144897471), Index(15777604911998375562), Index(12079386534868310467), Index(8101623965590751274), Index(11130060090686562005), Index(4376728691243668232), Index(12200832561144218266), Index(12381770063840426070), Index(3988148949318956161), Index(9549369066028576065)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15024599822655559484)), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(5717330354130703164), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, Commit(Index(6618715822852385561)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8595362668362275654)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17538987448242241884)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16248739036439180648), Index(10327747122831246063), Index(17101316485892786469), Index(4901152671323366997), Index(1996115392562718203), Index(7587043655493099865), Index(14411511655260207188), Index(2395908313999905554), Index(2820494700659854473), Index(2560214142581796868), Index(9500461415192437686), Index(3757095382969852518), Index(16688265657568958246), Index(16138201101548633493), Index(18038153094112888260)] }, Commit(Index(6592600314272832696)), Commit(Index(10162249225544422202)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13053136464429474999)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(14912092846975877918), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17303255253043999894), Index(18413422301693089271), Index(14766143846094150407), Index(13249652311027215064), Index(12301821974338001727), Index(1282440792048137538), Index(10345273427608457962), Index(9042613085329035555), Index(2001947248719962743), Index(14736249943859657226), Index(13533266861142548583), Index(4639775000304181858), Index(17150433105173549832), Index(3517186982131470096), Index(17464881751290528606)] }, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(4484162692211574579)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17362255750918237732)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9841214106247565561)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4943492353869023567), Index(15454792401481252109), Index(15495275587886812158), Index(14879873395966963562), Index(7409214541646280220), Index(14622927345063795180), Index(7835519232624784533), Index(12013540222840578738), Index(10284471100744647156), Index(9544147738743492318), Index(7226347742228977132), Index(18075291564366354494), Index(14014012887907603910), Index(12517865999984786567), Index(10455955678755798440)] }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14157414531813710280), Index(9695919585688633593), Index(18277336187792655278), Index(5466996456738993271), Index(3274904727631254340), Index(4074377555818813687), Index(17677484100752223720), Index(17670651096570805512), Index(2558402893034442700), Index(786486155191329321), Index(16399079784404943325), Index(9032925626018423310), Index(708737820664570106), Index(3603382806724722233), Index(7357015718144041905)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7556638469881187351)), Commit(Index(4742792380916605019)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(445660088649798172)), Commit(Index(12256614484743268733)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8025987655125757744)), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(710446924200841539), Index(12828592243724088070), Index(13700936649788789706), Index(104768422922721328), Index(6593913314524024597), Index(3439211963030501060), Index(7988731184842899977), Index(6926202373277659222), Index(13225524779573741214), Index(14248197169265909967), Index(3321663540135412530), Index(4985217520743053797), Index(12352219377553634585), Index(693462037052914765), Index(1552868502422422500)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(10377531209020745645)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5587896599628038160)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, Reconfigure { num_added_nodes: 0, removed_nodes: [], threshold: Index(15847578153599494997), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(8605502204857675532)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(13676100624980186983)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6360374148639654415)), Commit(Index(3126784281242806397)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(3997694510490813015), Index(12622369194634640040), Index(12116733881088804434), Index(2321267170953747269), Index(10325295472670712543), Index(15675526509950475834), Index(7647392048604936894), Index(545907449613045236), Index(6265350617066305007), Index(1513165137108391408), Index(1676975666074233677), Index(12733072016090420210), Index(11335949311573660860), Index(12970107136401947919), Index(6048609259961482598)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(12642918994476525815)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Commit(Index(8572083895658628989)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8606640289595244384)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2885302051336759188)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3864716323350113630)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2105970480986299798)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17226466699733400566)), Commit(Index(11157005828117463451)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17040383356465398787)), Commit(Index(50516977122028399)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13023905894138959083)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(436719101889981109), Index(131432516149160629), Index(7129003382136369254), Index(3426659622997635547), Index(14190350911656254690), Index(11651248932850080447), Index(14381752635831453399), Index(12027674968302207158), Index(5813873221098249425), Index(8874121428376577617), Index(471955022852810452), Index(8428283463443277442), Index(12052866783507187803), Index(4661496883867320083), Index(15294709198838453736)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2293293827917574303)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(18215731741824126716)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10175245912702921143), Index(9729477768044050832), Index(11912193929088993023), Index(14555446047069205704), Index(14695430727695787692), Index(13884937129145110671), Index(1870808899709294651), Index(4526082178479847600), Index(10897136569076673711), Index(4651033902832416260), Index(6024567287747117015), Index(10005009623249419802), Index(10997590389497509906), Index(9747647954874856418), Index(2195494487798375952)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(176216439995207384)), Commit(Index(11530012217922849825)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Commit(Index(783981163027538985)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9676223217563092451)), DeliverNexusReply, Commit(Index(3158271435420181762)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12410741219313109822)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(6358869747368776033), Index(3223130370645278885), Index(17818867872494516526), Index(12204124542995689208), Index(9226772866179133680), Index(3407743069527666770), Index(14345387163912949675), Index(11790539588730926565), Index(12111604539183472605), Index(13715658236147753368), Index(425052684219277183), Index(11152863278566017226), Index(180851591719846077), Index(4560753670011326846), Index(17869320206264954349)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6385442076773951622), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11198220080172312907), Index(1306885194083551801), Index(4033242044310747300), Index(11249095626298570961), Index(6413874140259895465), Index(4209042224318991874), Index(8837583199034317524), Index(18406611422148490519), Index(5021497723439764651), Index(2153843598260718194), Index(12018571137175713090), Index(13713636149381025243), Index(7012931550205749594), Index(10702932498076736440), Index(15673494946797279323)] }, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7268110465687644286)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11729763390993834082)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13525091785529401335), Index(12505577542480241602), Index(5647440956230761042), Index(11317889582494419884), Index(15538099831667586810), Index(3722303644450089790), Index(1386891283452011531), Index(4673286685852577742), Index(3662011224102186273), Index(10136811296832429956), Index(438098520697383320), Index(3938033714677379209), Index(3817256221431106778), Index(2025901065898860154), Index(12967779358919400673)] }, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4949244351608452893), Index(5925469922264751031), Index(5401835758703296493), Index(8886512188744593650), Index(10077331781480371875), Index(17070806331384257901), Index(11223506753947022976), Index(1658350805540867535), Index(9984492146478741150), Index(8630320521809850782), Index(13197259098561605733), Index(16141094699284645702), Index(11877943188909620353), Index(4189773356089293983), Index(3413698189667257636)] }, Commit(Index(260711155784918480)), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10323068869158050491), Index(5704443425012086845), Index(145065111726288998), Index(9740312780711263665), Index(1038191427817743433), Index(16122813776854355821), Index(169459722649509974), Index(6099155221682233150), Index(14168573526090716066), Index(5261879806763222323), Index(11102245134271027883), Index(3833643218640432627), Index(15778109148007088051), Index(17223384166901333725), Index(28912308858521608)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(11054168294068272439)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(15529016599165925729), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2691380374875032157)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12000777575970696155)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(366739488747469149)), DeliverNexusReply, PollPrepareAcks, Commit(Index(11874615778965619232)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15808255237734082646)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2505905707002436339)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4530562264011086362)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17802467090465612418)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13030199864939180149)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(13180432187895759296)), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1357562288954909262)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Commit(Index(11840684477793290148)), Commit(Index(8447525720168395816)), Commit(Index(1674142929004435141)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12201873561777788607)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(2295820054485214729), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11250603605411585950)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5547908169856424769)), Commit(Index(16178823234490102698)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(44250159050392669)), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(11641214542680509686), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17332019644802352703)), PollPrepareAcks, Commit(Index(4004346644178281946)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(206462630663862941)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(7884500106542769693)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18293480189097374663)), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(3802012910157177899), Index(2513303305394269166), Index(261418654572901819), Index(10156975303735331384), Index(10269756914553934731), Index(8386411932361250607), Index(15653549362330095454), Index(10753120163530388676), Index(11504658529264272169), Index(12382638252497182442), Index(17700336259191628110), Index(3307492183415008834), Index(3626038045441442979), Index(1110815564430173121), Index(5087103118747558596)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16259630491757683698)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12093736422860477497)), Commit(Index(9848747969828000898)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15435555625967981619)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15687126366921953139), Index(2743094322322732724), Index(1195895083782656716), Index(4567321805966148153), Index(750610897739320317), Index(4710857968209057244), Index(17025086776898642490), Index(2853936001009082904), Index(11502512371525954860), Index(8589670766974752766), Index(17542017953213471112), Index(1761546370806231453), Index(5847998485092294913), Index(7140739416997715203), Index(8387990262940825705)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7491512183393877883)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2098686216930009405)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, Commit(Index(11434392513873331180)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(668786960158298806)), Commit(Index(5994395619206867741)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6412406000722248334)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18214708339461389678)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6877543717901815605)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(7816810942502493363)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13112752371848075292)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11173685867907517821)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(8386582251569096360)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17111698880491225636)), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1363688229832334466)), DeliverNexusReply, Commit(Index(9409480554629329296)), Commit(Index(10284346927257105946)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15147357283839023203)), PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(178974791895593201)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(5328520967703989441), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(10624747800218087297)), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16967095324432630202), Index(1330880413848614888), Index(3877066383091830696), Index(10562691520021323896), Index(4759561173580837283), Index(11382768314922580166), Index(9705463961634835239), Index(11369155699072512895), Index(3409209634280416177), Index(9353094653927832845), Index(10375539785094855918), Index(14919439977281194256), Index(15073849072559674934), Index(1177546080595538710), Index(8278206958315628472)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12697083457141119517)), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13234980501958690208), Index(11633281838643086042), Index(12952538457940159986), Index(12651117605376260014), Index(3503315348276118237), Index(7350958701631728477), Index(11794081796595821146), Index(4202125726071801256), Index(9953889709956293977), Index(4148803441230060112), Index(17477938516304732932), Index(13687111192012493437), Index(5071667765395600737), Index(2347923452883510536), Index(10583143395155913519)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(10102925078466392997)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(9326617967677934389)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10207752235373004077)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4888371876809664680)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1858061495073222697)), DeliverNexusReply, Commit(Index(3548254801182509421)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(12000822605263159435), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, Commit(Index(10799065477066287020)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15367667012338394449)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(2883403300211458409)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(6824761343202323656)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4791357038263576766), Index(4037987467715788360), Index(14045349304627066590), Index(13511197665508998779), Index(12688718155309993182), Index(11733660283779066967), Index(6803556148040195069), Index(12823772114033503228), Index(7229352385411378749), Index(2057807579451152755), Index(17028362611222255338), Index(6855456859840434267), Index(1167370450915598026), Index(7231881074649155760), Index(4136419998313200296)] }, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15689872440168052616)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7586386511927859316), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(10251133808615074366)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14515382882666821884)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11227647257762615079)), PollPrepareAcks, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16801541565622949797), Index(6770384029177275365), Index(15377178657561314616), Index(6151452764900085017), Index(4505220512040629277), Index(9565880865126214114), Index(15832211030013468803), Index(3711462473400842865), Index(675555599049749593), Index(687364064024093773), Index(14521652702257257147), Index(8427847255716492099), Index(6888920233570493477), Index(5943624179492550686), Index(4533270186717979912)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9033813856366845835), Index(7229023803838780006), Index(13541506048477417149), Index(10942419377547404772), Index(6205294589257990982), Index(10749158387488892541), Index(10461182709973701259), Index(1711698017950110579), Index(9218362435419653196), Index(3199280627528921807), Index(14026206776590186823), Index(2579538121780073023), Index(2210346265960198647), Index(2916746939179612931), Index(2838922348266527609)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(12483692142211394924), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10763405275384647737), Index(12521040244542878236), Index(4542296609189646874), Index(6530918764294440962), Index(11723477322275091265), Index(11641557173712579393), Index(10985674102191062400), Index(17953808893572204780), Index(9549608267544706660), Index(7623805716366238727), Index(10464805844650786901), Index(9473303445646803419), Index(9905379203103144705), Index(15469718517844433745), Index(13418745031933859096)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(352084029879643347)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1050900476108260806)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7172347432669163490)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13967389148665748722), Index(7484461727548523070), Index(5558191850247876277), Index(6146983522949335169), Index(16166024204336539291), Index(13084160958057008654), Index(7369345306148036992), Index(9049231124588683695), Index(11818084227166687581), Index(4426700074377147971), Index(1717918551839773302), Index(8699380493495071331), Index(17607418904781945734), Index(13153602759775016478), Index(7183913476927998136)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(6520796080140725069), Index(203058639249276664), Index(14606327215596911567), Index(14325877438180219388), Index(6666348249877280986), Index(9047012337929738589), Index(3820876867912705114), Index(10551123973952075683), Index(1366800042835800054), Index(1251066672221776338), Index(10512885127018829105), Index(7184286925703944065), Index(8114200671132527959), Index(17369663185324504731), Index(4950671109558267876)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10610578020162887061)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14410612445958722237)), Commit(Index(14099495650978502834)), PollPrepareAcks, Commit(Index(10428471581179601651)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18403413528197751494)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3227449142584449625)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5080518174870700455)), Commit(Index(11793420467470117408)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12306145413052876124)), DeliverNexusReply, Commit(Index(7991029737349219903)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16403103057442445740), Index(10334508115755485852), Index(10136913898002071553), Index(5299529588845823096), Index(11494971324345932652), Index(17901919482036654186), Index(14901963126588620054), Index(7719864670324985721), Index(11145121017981580239), Index(12045430878398849609), Index(3096812274669870856), Index(4549913820514270666), Index(9988661166960765366), Index(14309997355427561215), Index(5219305593306065883)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(671106596864060533)), PollPrepareAcks, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1034807727888612616)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5932022497118157978)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15940506757376697484)), PollPrepareAcks, Commit(Index(10770414572400300066)), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13900489667719156805), Index(9655962733350082884), Index(5574629693971631441), Index(2563300640396804568), Index(6861928417328630026), Index(17796573526269609261), Index(12550176474026475884), Index(11517120552096651634), Index(14149039627856387061), Index(11941454956298454158), Index(13359177885518611696), Index(7281938149345481502), Index(10212765517581711831), Index(10611801337563578883), Index(9074978610081728167)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5649208125082381568)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11934606106878362045)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15658759481956618293)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(16750244594323704633)), Commit(Index(17812233424495485358)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15905317037768582503)), PollPrepareAcks, PollPrepareAcks, Commit(Index(6436800570174780983)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(15883278824566202707)), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6582198699581797580)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10391870061592587818)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(8932798969851000303)), Commit(Index(7643876219998350046)), Commit(Index(9556755263993436035)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15966133885916934415), Index(15001785101298869793), Index(13317753888139866137), Index(2796280579682952809), Index(12994139915910455622), Index(3436914582772009476), Index(10637631799480685285), Index(5036157594083163944), Index(6677891479102242588), Index(4237523673607808459), Index(10657545339276846358), Index(12734667715456266092), Index(7696479801195518234), Index(16551933367672570790), Index(18151002990676567579)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11586510541621681891), Index(13307127299326192251), Index(11100026701982256283), Index(4640010746163629921), Index(12484158761924028490), Index(2367798393142084333), Index(3703813593308946659), Index(4301766348828427050), Index(2477153619295383123), Index(7947611406996572509), Index(6017299360753012365), Index(9465026174631338124), Index(16142376890280922706), Index(16769906584038482104), Index(18084102852392997837)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(8527719320222153785), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9878541969220224615)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8465176670193709369)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, Commit(Index(9738734485630526937)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5110471718509558103)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(14316816892021471160)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4166580538368001677)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15059374740897340304)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4970484025424687723)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(183214632540732890)), Commit(Index(3259578069852817787)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(17629014386224084773)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15984380247700025700), Index(6575818098927872414), Index(2023436891622556021), Index(14474537354025829726), Index(17937056054606753971), Index(5728609681154238488), Index(964942420270170883), Index(9851173215665668987), Index(18239242281001624768), Index(10163889267685598229), Index(9321525039535880320), Index(16753382787876761862), Index(11327503582530969967), Index(4241624624955073702), Index(13554072390498419068)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17155537580529394995), Index(16010374169076922356), Index(3995129125829218116), Index(1377011129185604673), Index(11775275204926421250), Index(5309838574823322776), Index(15878145796276675511), Index(2955560842023721766), Index(8658011563013454587), Index(3547977037761451654), Index(10731925166992174467), Index(2532253498129410809), Index(7946250776477423230), Index(11876521849445675650), Index(13549806295446549901)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, Commit(Index(8834486813163619557)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5449036069413986408)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, Commit(Index(11381187693765219085)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4713871483373678789), Index(8434290582953198013), Index(13969543013266656088), Index(18263048655889653593), Index(3878602847059396574), Index(2762439472704818524), Index(9587337749343845462), Index(12781253199018011595), Index(5152580322520669295), Index(4383090033981161429), Index(15013524051060992207), Index(556370003875251432), Index(1038490512057431491), Index(6627531595790906747), Index(14200793191420150709)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(16252424217510280325)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8124946079027627205)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5508664921858256120)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9454043495784474288), Index(4665143356797834065), Index(16132864621058461801), Index(17927949770308878993), Index(12825251552530306559), Index(17946430877279136017), Index(6414732928284343000), Index(15613332908690578493), Index(14550049526720128293), Index(17864125204853385611), Index(12982070495731237128), Index(8684930120292543617), Index(14882703532135278930), Index(14156120703278652536), Index(247738402784122599)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5643749310060723009)), Commit(Index(10934926365494716850)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1280050035659482630)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 })] } } cc 3cab45c4cda06a8e3d7c0b926e4a97017ced0e68217f7d3f56a24c7f8291965e # shrinks to input = _TestTrustQuorumProtocolArgs { input: TestInput { initial_config: GeneratedConfiguration { members: {6, 8, 12}, threshold: Index(3598666382565965385) }, initial_down_nodes: {5, 11, 12}, actions: [LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(12502986758367287569)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5719089241379957081)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11102829069671780211)), Commit(Index(4779407089781807393)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(1853575675708957104)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10837298385189843327)), Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6406399170141939488), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15299763983988032114)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7211593369650515476)), Commit(Index(5868267262253467206)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7952114635063291740)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(11925837939626742864)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [], threshold: Index(14658888104943782363), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(11491347787571376105)), Commit(Index(11553189279502543507)), Commit(Index(17153379201232067075)), Commit(Index(8005490312897350006)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7629232624102505271)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(11352922451612171903)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(16563155297663633427)), Commit(Index(16037738073148467387)), Commit(Index(1524061087279126171)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2822642692595645052)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11691876503880879441)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, Commit(Index(6007324438914291674)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6914167119947255853)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(1859443205742806884)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2592245825018530076)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(8022670923778683254)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2771290716332558163), Index(8846479625165878702), Index(11496178893348231641), Index(16501799140248547815), Index(8391036084614810658), Index(6125546927848409873), Index(8238136447059662359), Index(1071843579785942836), Index(6836639231679113187), Index(6404722546051944147), Index(18319440982452734647), Index(2469475179481158680), Index(1418064280713599736), Index(2538619381689565781), Index(16115838553149940581)] }, Commit(Index(5225051580840365661)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15543422564120320397)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(7898847588246683033)), Reconfigure { num_added_nodes: 2, removed_nodes: [], threshold: Index(2388943721296842059), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(8740097393286525856)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11060043479396664720), Index(5649110376893207802), Index(15149308934087509146), Index(12391779627391275903), Index(11376550502788090068), Index(5522487292393000115), Index(2972897092944863301), Index(7639225069898177449), Index(2897377163886955884), Index(11887572818603347200), Index(9545244526476579979), Index(15254695899690497283), Index(17948891421605688358), Index(2351862633141932579), Index(10796580948762336558)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2045779160632288490)), Commit(Index(6002633907656892755)), DeliverNexusReply, Commit(Index(3441763785094777618)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15455359691334483816)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(183266063974540392)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(10149856061397770634)), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4189744548362066045)), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(17047330838266219810)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(295238925383630999)), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16756274101393221065), Index(4322307659447266068), Index(2735844587437486386), Index(2743301696598031857), Index(15365010783302206404), Index(4929872506890503208), Index(14602187539144715562), Index(319744255915348796), Index(6241021188175320055), Index(8529532048083905331), Index(5559831334744339847), Index(6764984633057751135), Index(3642320458103006783), Index(13972398371040836528), Index(9757966418141831829)] }, Commit(Index(2935112031860610236)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9785520922617045506)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(5680709701676070285), Index(1983184626941337095), Index(2606341717914544473), Index(12427689401321284064), Index(2832560368868196841), Index(2002691954389785007), Index(12909229746348251358), Index(9563010777997780727), Index(1485564786925285157), Index(13123403201059414465), Index(2410907557681013627), Index(15106647999610205187), Index(6867463890794706061), Index(11985021420026791613), Index(15495088542271604423)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(14974438942947459070)), Commit(Index(11639430756307433828)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9780840624125716569)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5490740916071143409)), Commit(Index(17181024989923885048)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(13889791135024720531)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11597632454563414952)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14185818554319336536)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, Commit(Index(197251373091187010)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(14281517334221959447)), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, Commit(Index(15494960479989661956)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(12021794701302472661), Index(4203254074947113298), Index(10992646772226981631), Index(18213845896061473110), Index(14426159287412122739), Index(13850840318688055592), Index(9544015578782664040), Index(122220646801680838), Index(17200975996367508893), Index(10362222212875562698), Index(2110009842226190619), Index(15796059097293544727), Index(2378041559839458979), Index(15061037406064836023), Index(451784032728343767)] }, Commit(Index(1881492606150841975)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15580242515310837407)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17117383027890769239)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, Commit(Index(16646201380452354596)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(14888052348508167797), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(645629336235986892)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(13988433099092918901), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(3342050805316999613)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(14576682719043769751)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10112856751303909052), Index(7170311877878915743), Index(15393074202139606101), Index(15267760568029098912), Index(7791734968645831568), Index(10863084837085802132), Index(15286764774928179756), Index(434667349345144297), Index(1767090260185112936), Index(4453129900275531313), Index(1672714887061573911), Index(8753496889739647357), Index(1226038234170237202), Index(12278424794298169643), Index(5073133512249220785)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15343966483818415715)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(5646954058396827181), Index(12274561045953204617), Index(13658753731387358257), Index(12723019148644332866), Index(1753842604206005600), Index(3540501112911615406), Index(5816246588306145427), Index(1457998349386632840), Index(1065461832457665947), Index(17427048528969658944), Index(13853170109037148579), Index(14649753538458847593), Index(10663040602901909947), Index(1859416668616172582), Index(14826665472896851900)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(73988118209371723)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7112620034223326497)), DeliverNexusReply, DeliverNexusReply, Commit(Index(7288606541149562170)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4455628127470610386)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(15091328838768697014), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(570202080649598239)), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8083801497123019968)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9239415348586199900), Index(6843265719369475276), Index(10319991758054312298), Index(875844408425914941), Index(406216502330585289), Index(513170150008941398), Index(1809673900897388123), Index(9398936541136571752), Index(17858506367758900766), Index(3573063833406369343), Index(4763314861785484843), Index(11269741247961594976), Index(3236367503461505227), Index(4696239464918531176), Index(5613257052904351224)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(15627425719349901853)), DeliverNexusReply, Commit(Index(14161135025702205260)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6328269850443164729)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13169519598498621962)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9808524773785493283), Index(11628187693086684672), Index(8419931908408520345), Index(9250134028607178686), Index(12827876544233759355), Index(3220653151931622869), Index(12639475606576218043), Index(1339371748904321102), Index(4051651148446606348), Index(4952013592362290290), Index(12246666494209094160), Index(11633741850662263040), Index(10509236945552329647), Index(15769218274045158478), Index(7023567984824330477)] }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(5106483833218933102), Index(13098148203640734701), Index(15912655729114575052), Index(7378551451107506949), Index(1889168224116072664), Index(7505029664107690369), Index(13191166462962004765), Index(9024618892804352385), Index(4436979025304009504), Index(12963691936777294956), Index(16180598956512959494), Index(4453052983834993985), Index(14047139185512586357), Index(11503521432597577695), Index(7033924593281828847)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2702521160964249506)), Commit(Index(4528476720975897295)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, Commit(Index(14542145499099679182)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12288205622928119838)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply] } } +cc 0c425da770e0f8300de4a8b74d276d82c43506b768402db2b5ce54ff00e8b53f # shrinks to input = _TestTrustQuorumProtocolArgs { input: TestInput { upgrade_from_lrtq: true, initial_config: GeneratedConfiguration { members: {0, 2, 3, 5, 6, 7, 8, 9, 10, 11}, threshold: Index(2075096511080142379) }, initial_down_nodes: {8}, actions: [DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4020245607110230923)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5202742974973809261)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(1871704999202673885), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(15326610393832271255), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11360549582327604261)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(10127668639605139294)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11713588710804093667)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14330351158975901340), Index(10945870089797654208), Index(11673303212796864037), Index(11539585757016254090), Index(9908978172356507943), Index(15745021129434558010), Index(12877244544080522439), Index(16100498132608927025), Index(14359582047100916328), Index(15122686810353900495), Index(18356855146093614213), Index(1606827921501547080), Index(9812005138409339659), Index(4658607637225889659), Index(9260193883917774570)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13352335269286179960)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6852868162851752733)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(2269680166186569338), Index(7929456910973125785), Index(11554040547133147431), Index(18385573591474144496), Index(16726884356374773789), Index(13706813383835740386), Index(14008368352385944709), Index(2560571926982850366), Index(3052970224008144874), Index(8159299885043937860), Index(17990900461555939415), Index(3464994328331046212), Index(7846471838847320594), Index(10795177281858896102), Index(2614039933028931675)] }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(6684717609398501386), Index(7259524238988711000), Index(5488586987102703084), Index(7747676500423238318), Index(12921463504987239299), Index(16874558153801118253), Index(7656813359679967213), Index(11437751114561594859), Index(17982257311983186731), Index(15855976679316012227), Index(11904569757357163192), Index(2983351955878529451), Index(7569364509573719000), Index(9878989439335962733), Index(2531909016226168857)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(17274729760256844933)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7654730680640031698), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14703913234987705390)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15419746116721089877)), Commit(Index(5287756350475504970)), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverNexusReply, Commit(Index(7258794331006030169)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(788120257668552393)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16664868796956807356)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(1774349379251079176), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16893545234724746613)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11040119642951827164)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Commit(Index(18092660183696109680)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7763300916150202110)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(10607807608410313558)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16971028485691951534)), Commit(Index(6492168759194941064)), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(13399217516604632848)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(17099790387361381958), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3572074233529369732)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(6662630358564144207)), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4880728999357584242)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7579077204725361)), PollPrepareAcks, Commit(Index(4393765432954784039)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18109841008836482438)), Commit(Index(13835374143159234084)), Commit(Index(3245352667620020954)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7820043085601641864)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Commit(Index(9710494244982034453)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1576074415931439980)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [], threshold: Index(2607305777135002624), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1914394466092075328)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(7042984803834477454)), PollPrepareAcks, DeliverNexusReply, Commit(Index(8560445299901326311)), Commit(Index(10616764939860543206)), Commit(Index(15973154036063239239)), DeliverNexusReply, Commit(Index(11687296140216519130)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6540947024582932391)), Commit(Index(8775768918058113150)), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14541255493008572102)), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Commit(Index(693932744225779592)), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(11612262593545603239)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(10785562310847273447), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(15009387807890191419)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17657340517248348821)), PollPrepareAcks, DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(8323463726974579539)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17606601719067621625), Index(13315041283824806666), Index(12790626218580703939), Index(3998205859273977367), Index(14399321498368536752), Index(9805478429054669276), Index(7202110426152280676), Index(16426952144465519453), Index(7383275362747904282), Index(9065170414648043158), Index(5821789428442067682), Index(2559944776282544473), Index(1765580900629816627), Index(17342401403612607956), Index(17328156859536938678)] }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(13440811399219871641)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7516552806424558061), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(15078588233715811477)), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(13986163301149362060), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(7876360854350466952)), Commit(Index(2353200540214153709)), PollPrepareAcks, DeliverNexusReply, Commit(Index(9182401475070532923)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(5087624271102573827)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4693926470684857818)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2165685895920361473)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14591303454177381450)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(7160901099220421066), Index(5170123843246811101), Index(17620607858815585257), Index(6042856623050399758), Index(12092654714785041197), Index(11772743173297790701), Index(13965733242654532845), Index(2125781942256043726), Index(9627808883615539129), Index(17463212110294198777), Index(7571297210937289936), Index(9940974956101268386), Index(13956455477225443144), Index(12314140043289205478), Index(8504524847377898365)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7317598779338918802), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(15335100021273210153)), DeliverNexusReply, DeliverNexusReply, Commit(Index(17706643431380657470)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15395240294575426147), Index(5401877605986442836), Index(8482694159043635178), Index(6714126593710859962), Index(8525694945072718318), Index(7329610073317027055), Index(18349584935274549147), Index(4673147460901942309), Index(11591022170294189174), Index(16197925525087354896), Index(13751191049466110100), Index(12485679995310282361), Index(12440471970956889462), Index(8935966482343516557), Index(1448212624694504836)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13498137168226038399)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6720054827353034886), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11454738057052777586)), Commit(Index(13136659443736043275)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(9376765783969533486)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(4808299794540374877), Index(12778141777799044157), Index(882054218238426304), Index(17643612711627026583), Index(1349172206358924680), Index(7296807161574372783), Index(2811773706720193434), Index(18380623917374352350), Index(14981154898986496517), Index(9286187690467444150), Index(2203758213149364743), Index(7253187837136259047), Index(14798780745627214998), Index(9674453346602946518), Index(3630005783169940846)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9671249857869319552)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(13633352709678011664)), PollPrepareAcks, Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(17593270954590912275), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1813184379179125314)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11174913761474578420), Index(3996691452980596609), Index(15325409015309468328), Index(7882432990563230348), Index(8878232149275404887), Index(6187637105636249699), Index(15172068612398906501), Index(17867655504182477065), Index(9331244006556136725), Index(8656782834051558255), Index(18046959753467538906), Index(10468676508582759745), Index(17346910448613840977), Index(18027024635445329950), Index(10754884539225562433)] }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14060142301373180212)), Commit(Index(10315464890154256854)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(10841785659283871307)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13440892332981517897), Index(5475206522237036683), Index(8342531911644166471), Index(10693428208881285463), Index(16256770331594411396), Index(9838400167665778401), Index(13148467628584819398), Index(10820964182237495268), Index(6937278955309052192), Index(15959952591609700792), Index(4028983636792497220), Index(2167162516005329873), Index(8192345643091085343), Index(7976638532106012331), Index(15628531582709324174)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(8021049497053202140)), Commit(Index(12980109764808642862)), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9969917896930741527)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(1709367603251772862)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10909003754651025079), Index(3827228184604956604), Index(17236884144015129457), Index(3396390203582650437), Index(3357339819851759075), Index(4416748412078575098), Index(6960784198582881963), Index(6326579943226158899), Index(4611419804378079683), Index(7483172865317927215), Index(3937696713118137220), Index(8706777221197275757), Index(4299189495068686767), Index(10058228556032476649), Index(659461018190138583)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(16617957066561928098), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(7126703488014705565)), DeliverNexusReply, Commit(Index(8046758538772912000)), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(10580369053960414377), Index(107461561316455695), Index(8958793331193737331), Index(11306930385313634382), Index(2519576490003663148), Index(12038187089186499369), Index(17655435624406921818), Index(18070168459996720826), Index(10998055857912669375), Index(14086394521819140142), Index(1637034689255939752), Index(16174220294968236928), Index(18285324716372801917), Index(16516657384089604603), Index(1881939802854798605)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(2386314852451433463)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1453828102284776808)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(860921346976613191)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [], threshold: Index(11056933848883062627), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(15053667624858876228), Index(7492254090022277249), Index(11905343258213333496), Index(8622794296608811431), Index(6591417961971819448), Index(17441114176517195118), Index(14390866542596077138), Index(347763152437321615), Index(11671955860974045853), Index(9455789537810620885), Index(8190877030816111243), Index(1694523588414912058), Index(6897352997615858580), Index(5353001030237286853), Index(7895691407507481373)] }, Commit(Index(11816042392490167926)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(13091469520969987179), Index(6308444182925175827), Index(6819745408559421892), Index(1838886779874888079), Index(15012031807669069903), Index(11081304442788654657), Index(18237504390317866910), Index(948365321481302792), Index(17826594616729522388), Index(9915824624584269923), Index(3528673426314319303), Index(13154844123236744861), Index(10530607804391754316), Index(11945695526343176800), Index(8494427344419716860)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(3237718693266459803), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(9999672702266021082)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6734187809496248048)), Commit(Index(2748081796500650589)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(16250811921306391081)), Commit(Index(7726930765933049872)), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7598415822152798836)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(17566081823901124466), Index(7763406593142650296), Index(9440426426661861936), Index(9125729394704318289), Index(12348813795752900706), Index(4761313528187712958), Index(8530565577149900199), Index(10481413325663595858), Index(8487650275892984494), Index(16523172080498921404), Index(14833535390751434090), Index(11469153948870876876), Index(12502196240642534500), Index(16096392529243150939), Index(11441219493410719408)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17808196008426122856)), Commit(Index(16297435489222864973)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(11467627468823624107), Index(8535596677062615686), Index(2631942318928574997), Index(15412138565953792939), Index(11755250226140276314), Index(14576016440375873852), Index(16812139847499381404), Index(7991640605609873399), Index(17921902558287395978), Index(15495699556351969970), Index(6956827120162932701), Index(1510890079753214660), Index(18385347232059989312), Index(16802314528085012052), Index(9292333923475093248)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6355951224944071287)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4407738633294984396)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(7052621039203008832)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(780590560348177805)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11814507354829467033)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(13949139416374683849)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(6801553854170555786), Index(8494823562806824999), Index(539349898269215640), Index(11841626307419272183), Index(3926627888960967214), Index(2628686577478591776), Index(10477209502465491003), Index(5503103182562780252), Index(3951670129389165713), Index(5234936414737500541), Index(11057017203080173130), Index(12899759063492358951), Index(11887985733063935340), Index(1267914853164766197), Index(6467578628567731529)] }, PollPrepareAcks, Commit(Index(8012773927833842131)), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5305582424552928283)), Commit(Index(9829659207617287906)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 3, removed_nodes: [], threshold: Index(14048294989600641651), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(18279519570173783910), Index(4866179355112036706), Index(9566387390284025244), Index(10095518147006403550), Index(11575187938899381849), Index(1423400877681760080), Index(422149649790924300), Index(1071893476100108915), Index(5191987601988174139), Index(9332562879164502106), Index(2479426342463769622), Index(17452251976058898392), Index(9671997025893791765), Index(15974177206164990214), Index(12465486800624213079)] }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(15351243098520669810)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11572010011254698568)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11133468361145281250)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3083756653152569639)), Commit(Index(8032988340277712307)), Commit(Index(12538992977141060297)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(17942443508395133786)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(8148057384066570824), Index(5280524196114846491), Index(10791534214071006339), Index(9153886839860121115), Index(1980789572513063567), Index(7663574272695773770), Index(16257204430693521927), Index(2647559012217596392), Index(10449362425827596396), Index(4656932484896374208), Index(15494391584812696718), Index(17195777495649247259), Index(3784799853820380551), Index(4681844973077274488), Index(14754861529563851899)] }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(6274325833542618680)), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(789196727680948465), Index(13960741201721158963), Index(14745065865308880602), Index(696661230191550035), Index(9653895641365507641), Index(4225190834959986985), Index(11592325433333855811), Index(1582797696935448168), Index(2666416036411085372), Index(3551061636379425847), Index(14319869058517426208), Index(16602629658362525835), Index(5982455806449817181), Index(18083053726569639693), Index(2520742566501869064)] }, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(9938534153483594502)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10219843251135779363)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(1203210213050278268)), Commit(Index(9173895060054557994)), Commit(Index(5654227066185513206)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(8567411121119823333)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10807150537926399005)), DeliverNexusReply, PollPrepareAcks, Commit(Index(13592376780254003439)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(12957477457811797591)), Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(6410261083504601370), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(8280836442616047174)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13162902017049847202)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13103524469879919129)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17121459834606186639)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(138696650346110836)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, Commit(Index(12449220339129337703)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(2614351463872254859)), Commit(Index(462553020848420194)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12845896403561362795)), Commit(Index(1118785620541981574)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11299360458404855955)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 4, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7238704236980388811), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, Commit(Index(924028928934058821)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(490428983514126615), Index(9530862547613155780), Index(3028767464897435065), Index(1751152067414330693), Index(13425918947627317320), Index(8049801604123674213), Index(7064833913730842585), Index(17529884398487143592), Index(18201817241430874549), Index(9201557179521815692), Index(5487772365790298911), Index(17327877377239717011), Index(5188396984208718706), Index(2746618734658178095), Index(4558604775854921307)] }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(18374069969104102474), Index(651477543848544939), Index(9978741033548767278), Index(12367094925170912219), Index(15047346518967277960), Index(8844217492480529608), Index(1327546472178010078), Index(9942951128521552704), Index(18395110773781274632), Index(18087867007453815361), Index(7264612749775502162), Index(2748934562493364932), Index(2077972063616145568), Index(5159031636189928182), Index(3259754503949828327)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(2641475268435219389)), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(18197459444879493898), Index(5965379858718521812), Index(12054014684801183307), Index(13810593481528738817), Index(2045810787767925040), Index(1040766974201982781), Index(15457881575423816433), Index(16110104764337569986), Index(2469860686699146088), Index(4069416945206138642), Index(8894464956207557494), Index(4156335820093758503), Index(13568492617348602442), Index(11998817619404638334), Index(8843459627495747969)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16849769347774718467), Index(17409826651220238741), Index(9400487841919401223), Index(13202929559326935110), Index(17957060832577513837), Index(9505466516327066213), Index(18413808136072257628), Index(1035637266572764421), Index(16050790678635427414), Index(333278615944196839), Index(2389141132627651456), Index(13160573301407121554), Index(3489162588721393680), Index(10303641061949835596), Index(14961528932859858154)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16780407891057479520)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11588436558758895620)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(1953359894730065180), Index(7992274739634752296), Index(14225059321904303055), Index(17702974008275692046), Index(7057829844828536639), Index(1494781716540798455), Index(5382076632876158733), Index(13552463289652006680), Index(8008221625125365990), Index(1799439749763512562), Index(8191075158843500229), Index(3542797735776152611), Index(13151100558629668187), Index(6921583636220206746), Index(10021391175040023498)] }, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3876443659064060431)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13420745941387982346)), PollPrepareAcks, Commit(Index(14031224175967412659)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12574694402049901446)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3644781799398026228)), Commit(Index(716496160626962649)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14733782563796177900)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2627429067335326880)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5055860091075765185)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(1551299508378790178), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4205981991591304163)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(17520108299292173853), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(10410966573608420749)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(7553293477899687933)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(13422504520395129892)), Commit(Index(3867124488688104864)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(13367306370444000247), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14170506643044331442)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 0, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7352573905450423790), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, PollPrepareAcks, PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(5940356092857282195)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 2, removed_nodes: [], threshold: Index(17753040054554425955), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17018409707519469719)), Commit(Index(15465490957956103481)), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(4163786293476436594)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(14533323098280559027)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12527606536722888251)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(17197342315964287041)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Reconfigure { num_added_nodes: 1, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(10752362304518254170), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(232139490728064163)), Commit(Index(16428509525994673245)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(13541961055259433153)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(14510950032574050174), Index(10287956809676577610), Index(2143609321377351843), Index(13133709492233817810), Index(3650776165952487913), Index(11742063829654510673), Index(4905813648079448671), Index(8048554921022564069), Index(13185573505216173275), Index(11539229396220930471), Index(927033662978902186), Index(2027860124945429740), Index(16365035168581640537), Index(4906193159697723241), Index(8661367306956690808)] }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(12244272240165019569)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(3296703810383705095)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16999838010349898220)), Commit(Index(17262474665486107074)), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(14619584100637054121)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(11350626431798773381)), PollPrepareAcks, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(16721815551450829166)), Commit(Index(2069291223452190945)), PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 3, removed_nodes: [Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }], threshold: Index(7353013837547115864), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Reconfigure { num_added_nodes: 0, removed_nodes: [], threshold: Index(8081408671495227071), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(1459909077775289306)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(2522727984599443832)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, PollPrepareAcks, DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(4480732953795899325)), PollPrepareAcks, Commit(Index(2164724381715260089)), Commit(Index(2099539570104973387)), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, Commit(Index(6547162605603609)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, PollPrepareAcks, RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(16974655747237418207), Index(126403665420492909), Index(15411422508246208775), Index(10600455876598204169), Index(4011304974184363948), Index(2043655559567786207), Index(5715350500486693330), Index(17157493495669388966), Index(12448240485550284130), Index(2532882057809952474), Index(9633145953357444393), Index(8236592165301466576), Index(2903256721914364274), Index(15602821862039129379), Index(1144164423138621356)] }, LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Reconfigure { num_added_nodes: 4, removed_nodes: [], threshold: Index(3762374576257187234), coordinator: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, DeliverNexusReply, Commit(Index(11812315002069242919)), DeliverNexusReply, Commit(Index(7135295796261466890)), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, CrashNode(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(3210931176407393412)), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(18195488548734133889)), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(6165470877854159702)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, Commit(Index(14384685101594940937)), PollPrepareAcks, Commit(Index(7597817714860886894)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(2617264439979063354)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadLatestRackSecret(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), RestartNode { id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, connection_order: [Index(9592330572194221366), Index(17548753351963732213), Index(6134602441874805537), Index(11052512374245835069), Index(9485351794431236145), Index(18061727828252432519), Index(11537532804102698907), Index(9713377113635479081), Index(13079338436863271791), Index(12453738398309118759), Index(10699565311328458628), Index(3088123295936342251), Index(16677639640078807661), Index(13975369826495371362), Index(6363718810028798642)] }, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverNexusReply, PollPrepareAcks, LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, Commit(Index(2665705931415054580)), PollPrepareAcks, DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), PollPrepareAcks, DeliverNexusReply, PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(15261877162264837270)), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), Commit(Index(6887721711235476358)), PollPrepareAcks, DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), LoadRackSecret { config: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }, id: Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 } }, DeliverNexusReply, PollPrepareAcks, ClearSecrets(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverEnvelope(Selector { rng: TestRng { rng: ChaCha(ChaCha20Rng { rng: BlockRng { core: ChaChaXCore {}, result_len: 64, index: 64 } }) }, bias_increment: 0 }), DeliverNexusReply, Commit(Index(2204264570871564990))] } } diff --git a/trust-quorum/tests/cluster.rs b/trust-quorum/tests/cluster.rs index 2a30c3163e7..1215e5b7eec 100644 --- a/trust-quorum/tests/cluster.rs +++ b/trust-quorum/tests/cluster.rs @@ -44,6 +44,29 @@ impl TestState { TestState { tq_state: TqState::new(log), skipped_actions: 0 } } + fn initial_config_lrtq( + &self, + config: GeneratedConfiguration, + ) -> Vec { + // `tq_state` doesn't create the member universe until the first event is + // applied. We duplicate it here so we can create that initial config + // event. + let member_universe = + trust_quorum_test_utils::member_universe(MEMBER_UNIVERSE_SIZE); + let members: BTreeSet = config + .members + .iter() + .map(|index| member_universe[*index].clone()) + .collect(); + let coordinator = + members.first().cloned().expect("at least one member"); + let config = NexusConfig::new_lrtq(coordinator, members); + vec![Event::InitialSetupLrtq { + member_universe_size: MEMBER_UNIVERSE_SIZE, + config, + }] + } + fn initial_config_events( &self, config: GeneratedConfiguration, @@ -404,6 +427,29 @@ impl TestState { } } + fn action_to_events_upgrade_from_lrtq( + &self, + coordinator: Selector, + ) -> Vec { + let mut new_config = self.tq_state.nexus.latest_config().clone(); + let new_epoch = new_config.epoch.next(); + new_config.epoch = new_epoch; + let coordinator = coordinator.select(new_config.members.iter()).clone(); + new_config.coordinator = coordinator.clone(); + new_config.op = NexusOp::Preparing; + new_config.prepared_members.clear(); + new_config.committed_members.clear(); + + let mut events = vec![Event::LrtqUpgrade(new_config)]; + + if self.tq_state.crashed_nodes.contains(&coordinator) { + // This simulates a timeout on the reply from the coordinator which + // triggers an abort. + events.push(Event::AbortConfiguration(new_epoch)); + } + events + } + fn action_to_events_reconfigure( &self, num_added_nodes: usize, @@ -411,8 +457,12 @@ impl TestState { threshold: Index, coordinator: Selector, ) -> Vec { + if self.tq_state.nexus.needs_upgrade_from_lrtq() { + return self.action_to_events_upgrade_from_lrtq(coordinator); + } let latest_epoch = self.tq_state.nexus.latest_config().epoch; let last_committed_config = self.tq_state.nexus.last_committed_config(); + // We must leave at least one node available to coordinate between the // new and old configurations. let (new_members, coordinator) = match last_committed_config { @@ -693,7 +743,17 @@ impl TestState { let mut new_config = latest_config.clone(); new_config.epoch = latest_config.epoch.next(); new_config.op = NexusOp::Preparing; - let event = Event::Reconfigure(new_config.clone()); + new_config.prepared_members.clear(); + new_config.committed_members.clear(); + + // If we are currently upgrading LRTQ, then we must perform + // an upgrade and complete that rather than a reconfigure. + let event = if self.tq_state.nexus.needs_upgrade_from_lrtq() { + Event::LrtqUpgrade(new_config.clone()) + } else { + Event::Reconfigure(new_config.clone()) + }; + self.record_and_apply_event(event, event_log)?; // Deliver all envelopes related to `Event::Reconfigure` @@ -718,6 +778,12 @@ impl TestState { // At this point all the rack secrets should be available. self.compare_all_loaded_rack_secrets(&new_config); } + NexusOp::LrtqCommitted => { + // Nothing to do here. + // + // The test run was so short that we never even tried to upgrade + // out of LRTQ. + } } // We should have no envelopes outgoing on any node as they should be @@ -957,7 +1023,7 @@ impl TestState { prepare_acks, .. } = cs.op() { - (prepare_acks.clone(), cs.reconfigure_msg().epoch()) + (prepare_acks.clone(), cs.msg().epoch()) } else { (BTreeSet::new(), Epoch(0)) } @@ -1110,6 +1176,10 @@ pub enum Action { /// Generate a new configuration by adding a number of *new* (non-expunged) /// nodes to the cluster from `member_universe` and removing the specific /// nodes in the current cluster given by the indices `removed_nodes`. + /// + /// In the case of an ongoing LRTQ upgrade, `num_addded_nodes`, `num_removed_nodes`, + /// and `threshold` fields will be ignored, as the same set of nodes will be used + /// for the new trust quorum group as in the LRTQ group. #[weight(1)] Reconfigure { #[strategy(0..MAX_ADDED_NODES)] @@ -1183,6 +1253,11 @@ pub struct GeneratedConfiguration { #[derive(Debug, Arbitrary)] pub struct TestInput { + // The initial configuration should sometimes be LRTQ to test upgrades from + // LRTQ. In the case of LRTQ, we will ignore the `initial_down_nodes` field + // of `TestInput`, since LRTQ requires full availability at RSS time. + upgrade_from_lrtq: bool, + initial_config: GeneratedConfiguration, // We choose a set of nodes to be crashed, resulting in them being @@ -1210,8 +1285,14 @@ fn test_trust_quorum_protocol(input: TestInput) { let mut state = TestState::new(log.clone()); // Perform the initial setup - let events = state - .initial_config_events(input.initial_config, input.initial_down_nodes); + let events = if input.upgrade_from_lrtq { + state.initial_config_lrtq(input.initial_config) + } else { + state.initial_config_events( + input.initial_config, + input.initial_down_nodes, + ) + }; for event in events { event_log.record(&event); state.tq_state.apply_event(event); From 4de66e9b005a38d9fc7d83ddedd735903416b8b3 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Wed, 24 Sep 2025 11:20:41 -0700 Subject: [PATCH 12/18] bump to v17; API version to planned release date (#8907) --- dev-tools/openapi-manager/src/omicron.rs | 2 +- dev-tools/releng/src/main.rs | 2 +- nexus/external-api/src/lib.rs | 2 +- openapi/nexus.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dev-tools/openapi-manager/src/omicron.rs b/dev-tools/openapi-manager/src/omicron.rs index e5c9158d462..2e36c75b50c 100644 --- a/dev-tools/openapi-manager/src/omicron.rs +++ b/dev-tools/openapi-manager/src/omicron.rs @@ -120,7 +120,7 @@ pub fn all_apis() -> Vec { ManagedApiConfig { title: "Oxide Region API", versions: Versions::new_lockstep(semver::Version::new( - 20250730, 0, 0, + 20251008, 0, 0, )), description: "API for interacting with the Oxide control plane", boundary: ApiBoundary::External, diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs index 58238e78c59..7382925b847 100644 --- a/dev-tools/releng/src/main.rs +++ b/dev-tools/releng/src/main.rs @@ -46,7 +46,7 @@ use crate::job::Jobs; /// to as "v8", "version 8", or "release 8" to customers). The use of semantic /// versioning is mostly to hedge for perhaps wanting something more granular in /// the future. -const BASE_VERSION: Version = Version::new(16, 0, 0); +const BASE_VERSION: Version = Version::new(17, 0, 0); const RETRY_ATTEMPTS: usize = 3; diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs index 75e3ac47685..60506ab0b5e 100644 --- a/nexus/external-api/src/lib.rs +++ b/nexus/external-api/src/lib.rs @@ -30,7 +30,7 @@ use omicron_common::api::external::{ use openapi_manager_types::ValidationContext; use openapiv3::OpenAPI; -pub const API_VERSION: &str = "20250730.0.0"; +pub const API_VERSION: &str = "20251008.0.0"; const MIB: usize = 1024 * 1024; const GIB: usize = 1024 * MIB; diff --git a/openapi/nexus.json b/openapi/nexus.json index 104f4239dbb..d88de5977b9 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "20250730.0.0" + "version": "20251008.0.0" }, "paths": { "/device/auth": { From 7f724c300892f5536eaed4ce4f3f71697f0d87d2 Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 24 Sep 2025 13:00:00 -0700 Subject: [PATCH 13/18] [reconfigurator-planning] allow adding zones if target release generation is 1 (#9066) For customers that are going to continue relying on MUPdate, the planner should act the same way as it did before self-service update existed. We ascertain this by looking at whether a target release has ever been set. Most of the tests no longer require the `add_zones_with_mupdate_override` config, so add a new reconfigurator-cli script which specifically tests that config. --- .../tests/input/cmds-add-sled-no-disks.txt | 4 - .../cmds-add-zones-with-mupdate-override.txt | 40 ++ .../cmds-expunge-newly-added-internal-dns.txt | 4 - .../output/cmds-add-sled-no-disks-stdout | 18 +- ...mds-add-zones-with-mupdate-override-stderr | 0 ...mds-add-zones-with-mupdate-override-stdout | 412 ++++++++++++++++++ .../tests/output/cmds-example-stdout | 4 +- ...ds-expunge-newly-added-external-dns-stdout | 4 +- ...ds-expunge-newly-added-internal-dns-stdout | 13 +- .../output/cmds-mupdate-update-flow-stdout | 2 +- nexus/reconfigurator/planning/src/planner.rs | 157 ++----- .../planner_decommissions_sleds_bp2.txt | 5 +- .../output/planner_nonprovisionable_bp2.txt | 5 +- .../app/background/tasks/blueprint_planner.rs | 9 +- nexus/types/src/deployment/planning_report.rs | 21 +- openapi/nexus-internal.json | 7 +- 16 files changed, 519 insertions(+), 186 deletions(-) create mode 100644 dev-tools/reconfigurator-cli/tests/input/cmds-add-zones-with-mupdate-override.txt create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stderr create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt index 2f1d99e8411..971b27eb2bc 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-add-sled-no-disks.txt @@ -12,10 +12,6 @@ sled-add --ndisks 0 # Generate a new inventory collection that includes that sled. inventory-generate -# Set the add_zones_with_mupdate_override planner config to ensure that zone -# adds happen despite zone image sources not being Artifact. -set planner-config --add-zones-with-mupdate-override true - # Try to plan a new blueprint; this should be okay even though the sled # we added has no disks. blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-add-zones-with-mupdate-override.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-add-zones-with-mupdate-override.txt new file mode 100644 index 00000000000..b1bb27cecbc --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-add-zones-with-mupdate-override.txt @@ -0,0 +1,40 @@ +# This script tests the add-zones-with-mupdate-override +# planner config. + +# Load example system +load-example --nsleds 3 --ndisks-per-sled 3 + +# Create a TUF repository from a fake manifest. We're going to use this +# repository to test out the minimum release generation flow. +tuf-assemble ../../update-common/manifests/fake.toml +set target-release repo-1.0.0.zip + +# Update the install dataset on this sled to the target release. +# (This populates the zone manifest, used for no-op conversions from +# install dataset to artifact down the road.) +sled-update-install-dataset serial0 --to-target-release + +# Simulate a mupdate on sled 0 by setting the mupdate override field to a +# new UUID (generated using uuidgen). +sled-set serial0 mupdate-override 2d0f6cbc-addc-47a2-962a-6a01e13376bf + +# Generate a new inventory and plan against that. +inventory-generate +blueprint-plan latest latest + +# Diff the blueprints. This diff should show "will remove mupdate override" +# and the target release minimum generation being set. +blueprint-diff latest + +# Set Nexus redundancy to 4. +set num-nexus 4 + +# Plan with the new Nexus. This will not add any Nexus zones. +blueprint-plan latest latest +blueprint-diff latest + +# Set the add-zones-with-mupdate-override config, then do a planning run. +# This *will* add a new Nexus zone. +set planner-config --add-zones-with-mupdate-override true +blueprint-plan latest latest +blueprint-diff latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt index ab5dfe0f998..9d2cbb99d2f 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-expunge-newly-added-internal-dns.txt @@ -12,10 +12,6 @@ blueprint-diff dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 8da82a8e-bf97-4fbd-8ddd-9f64 blueprint-edit 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 mark-for-cleanup 99e2f30b-3174-40bf-a78a-90da8abba8ca blueprint-diff 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 58d5e830-0884-47d8-a7cd-b2b3751adeb4 -# Set the add_zones_with_mupdate_override planner config to ensure that zone -# adds happen despite zone image sources not being Artifact. -set planner-config --add-zones-with-mupdate-override true - # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint-diff 58d5e830-0884-47d8-a7cd-b2b3751adeb4 af934083-59b5-4bf6-8966-6fb5292c29e1 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index ece6303ea98..67bc81ec0a2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -33,14 +33,6 @@ added sled 00320471-945d-413c-85e7-03e091a70b3c (serial: serial3) generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds -> # Set the add_zones_with_mupdate_override planner config to ensure that zone -> # adds happen despite zone image sources not being Artifact. -> set planner-config --add-zones-with-mupdate-override true -planner config updated: -* add zones with mupdate override: false -> true - - - > # Try to plan a new blueprint; this should be okay even though the sled > # we added has no disks. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 @@ -48,16 +40,13 @@ INFO skipping noop image source check for all sleds, reason: no target release i generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: planning report: -planner config: - add zones with mupdate override: true - * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: target release generation is 1 * no zpools in service for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * discretionary zone placement waiting for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * zone updates waiting on zone add blockers @@ -297,16 +286,13 @@ parent: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 blueprint source: planner with report: planning report: -planner config: - add zones with mupdate override: true - * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: target release generation is 1 * no zpools in service for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * discretionary zone placement waiting for NTP zones on sleds: 00320471-945d-413c-85e7-03e091a70b3c * zone updates waiting on zone add blockers diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stderr b/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stderr new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout new file mode 100644 index 00000000000..1ee59db0ce8 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout @@ -0,0 +1,412 @@ +using provided RNG seed: reconfigurator-cli-test +> # This script tests the add-zones-with-mupdate-override +> # planner config. + +> # Load example system +> load-example --nsleds 3 --ndisks-per-sled 3 +loaded example system with: +- collection: f45ba181-4b56-42cc-a762-874d90184a43 +- blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + + +> # Create a TUF repository from a fake manifest. We're going to use this +> # repository to test out the minimum release generation flow. +> tuf-assemble ../../update-common/manifests/fake.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler +created repo-1.0.0.zip for system version 1.0.0 + +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 1.0.0, hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, length: 732 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, length: 783 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, length: 783 +INFO added artifact, name: fake-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, length: 797 +INFO added artifact, name: fake-host, kind: gimlet_host_phase_1, version: 1.0.0, hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, length: 524288 +INFO added artifact, name: fake-host, kind: cosmo_host_phase_1, version: 1.0.0, hash: 9525f567106549a3fc32df870a74803d77e51dcc44190b218e227a2c5d444f58, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: gimlet_trampoline_phase_1, version: 1.0.0, hash: bcb27520ee5a56e19f6df9662c66d69ac681fbd873a97547be5f6a5ae3d250c4, length: 524288 +INFO added artifact, name: fake-trampoline, kind: cosmo_trampoline_phase_1, version: 1.0.0, hash: e235b8afb58ee69d966853bd5efe7c7e904da84b9035a332b3e691dc1d5cdbd0, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: a5dfcc4bc69b791f1c509df499e9e72cce844cb2b53a56d8bb357b264bdf13b6, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: 89245fe2ac7e6a2ac8dfa4e7d6891a6e6df95e4141395c07c64026778f6d76d7, length: 721 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 5d8ea834dd6d42d386f1eb8a2c5f6e99b697c9958bb4ab8edf63e56003e25d8d, length: 775 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 5d8ea834dd6d42d386f1eb8a2c5f6e99b697c9958bb4ab8edf63e56003e25d8d, length: 775 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 18c9c774bfe4bb086e869509dcccacee8476fd87670692b765aee216f2c7f003, length: 805 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: bf1bc1da5059f76182c3007c3049941f8898abede2f3765b106c6e7f7c42d44c, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 32307d6d75c9707e8499ba4a4d379f99c0358237b6e190ff6a8024b470f62342, length: 774 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 32307d6d75c9707e8499ba4a4d379f99c0358237b6e190ff6a8024b470f62342, length: 774 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 70836d170abd5621f95bb4225987b27b3d3dd6168e73cd60e44309bdfeb94e98, length: 804 +INFO added artifact, name: installinator_document, kind: installinator_document, version: 1.0.0, hash: a6a636b5d57813578766b3f1c2559abf9af5d8c86187538167937c476beeefa3, length: 367 +set target release based on repo-1.0.0.zip + + +> # Update the install dataset on this sled to the target release. +> # (This populates the zone manifest, used for no-op conversions from +> # install dataset to artifact down the road.) +> sled-update-install-dataset serial0 --to-target-release +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: to target release (system version 1.0.0) + + +> # Simulate a mupdate on sled 0 by setting the mupdate override field to a +> # new UUID (generated using uuidgen). +> sled-set serial0 mupdate-override 2d0f6cbc-addc-47a2-962a-6a01e13376bf +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> 2d0f6cbc-addc-47a2-962a-6a01e13376bf + + +> # Generate a new inventory and plan against that. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest latest +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, new_bp_override: 2d0f6cbc-addc-47a2-962a-6a01e13376bf, prev_bp_override: None, zones: + - zone 058fd5f9-60a8-4e11-9302-15172782e17d (Crucible) left unchanged, image source: install dataset + - zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (Nexus) left unchanged, image source: install dataset + - zone 427ec88f-f467-42fa-9bbb-66a91a36103c (InternalDns) left unchanged, image source: install dataset + - zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (Crucible) left unchanged, image source: install dataset + - zone 6444f8a5-6465-4f0b-a549-1993c113569c (InternalNtp) left unchanged, image source: install dataset + - zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (ExternalDns) left unchanged, image source: install dataset + - zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (CruciblePantry) left unchanged, image source: install dataset + - zone dfac80b4-a887-430a-ae87-a4e065dba787 (Crucible) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: current contents (unchanged) + - host phase 2 slot B: current contents (unchanged) + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 1, new_generation: 3 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (2d0f6cbc-addc-47a2-962a-6a01e13376bf) +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +blueprint source: planner with report: +planning report: +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (2) is lower than minimum required by blueprint (3) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones + +* zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated + + + + +> # Diff the blueprints. This diff should show "will remove mupdate override" +> # and the target release minimum generation being set. +> blueprint-diff latest +from: blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): ++ will remove mupdate override: (none) -> 2d0f6cbc-addc-47a2-962a-6a01e13376bf + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 1 -> 3 + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Set Nexus redundancy to 4. +> set num-nexus 4 +target number of Nexus zones: None -> 4 + + +> # Plan with the new Nexus. This will not add any Nexus zones. +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (2d0f6cbc-addc-47a2-962a-6a01e13376bf) +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +blueprint source: planner with report: +planning report: +* zone adds waiting on blockers +* zone adds and updates are blocked: + - current target release generation (2) is lower than minimum required by blueprint (3) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones + +* zone updates waiting on zone add blockers +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated + + + +> blueprint-diff latest +from: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 3 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 51 (records: 65) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Set the add-zones-with-mupdate-override config, then do a planning run. +> # This *will* add a new Nexus zone. +> set planner-config --add-zones-with-mupdate-override true +planner config updated: +* add zones with mupdate override: false -> true + + +> blueprint-plan latest latest +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (2d0f6cbc-addc-47a2-962a-6a01e13376bf) +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a +INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b +generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +blueprint source: planner with report: +planning report: +planner config: + add zones with mupdate override: true + +* zone adds and updates are blocked: + - current target release generation (2) is lower than minimum required by blueprint (3) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 9 zones + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 8 zones + - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 8 zones + +* adding zones despite being blocked, because: planner config `add_zones_with_mupdate_override` is true +* discretionary zones placed: + * nexus zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source install dataset +* zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: some non-Nexus zone are not yet updated + + + +> blueprint-diff latest +from: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_571a8adf-f0b8-458c-8e6c-5a71e82af7ae f26dc500-70ea-445e-9033-e2f494739dfc in service none none off + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 ++ nexus 571a8adf-f0b8-458c-8e6c-5a71e82af7ae install dataset in service fd00:1122:3344:103::28 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 3 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": ++ name: 571a8adf-f0b8-458c-8e6c-5a71e82af7ae.host (records: 1) ++ AAAA fd00:1122:3344:103::28 +* name: _nexus._tcp (records: 3 -> 4) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 571a8adf-f0b8-458c-8e6c-5a71e82af7ae.host.control-plane.oxide.internal + unchanged names: 50 (records: 62) + +external DNS: +* DNS zone: "oxide.example": +* name: example-silo.sys (records: 3 -> 4) +- A 192.0.2.2 +- A 192.0.2.3 +- A 192.0.2.4 ++ A 192.0.2.2 ++ A 192.0.2.3 ++ A 192.0.2.4 ++ A 192.0.2.5 + unchanged names: 4 (records: 6) + + + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index cd6b1089043..572c9125313 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -602,11 +602,11 @@ INFO skipping noop image source check for all sleds, reason: no target release i generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: planning report: -* zone adds waiting on blockers * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 89d02b1b-478c-401a-8e28-7a26f74fa41b: 18 zones +* adding zones despite being blocked, because: target release generation is 1 * zone updates waiting on zone add blockers @@ -1855,12 +1855,12 @@ INFO skipping noop image source check for all sleds, reason: no target release i generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a blueprint source: planner with report: planning report: -* zone adds waiting on blockers * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 2eb69596-f081-4e2d-9425-9994926e0832: 4 zones - sled 89d02b1b-478c-401a-8e28-7a26f74fa41b: 17 zones +* adding zones despite being blocked, because: target release generation is 1 * zone updates waiting on zone add blockers diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index 1ed80aeb5c9..9d953c1aeb7 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -861,7 +861,7 @@ planner config: - sled 9dc50690-f9bf-4520-bf80-051d0f465c2c: 15 zones - sled a88790de-5962-4871-8686-61c1fd5b7094: 15 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: planner config `add_zones_with_mupdate_override` is true, target release generation is 1 * discretionary zones placed: * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones @@ -1370,7 +1370,7 @@ planner config: - sled 9dc50690-f9bf-4520-bf80-051d0f465c2c: 15 zones - sled a88790de-5962-4871-8686-61c1fd5b7094: 15 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: planner config `add_zones_with_mupdate_override` is true, target release generation is 1 * discretionary zones placed: * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index a9f4bf4fc71..40c50ac072f 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -645,30 +645,19 @@ external DNS: -> # Set the add_zones_with_mupdate_override planner config to ensure that zone -> # adds happen despite zone image sources not being Artifact. -> set planner-config --add-zones-with-mupdate-override true -planner config updated: -* add zones with mupdate override: false -> true - - - > # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 INFO skipping noop image source check for all sleds, reason: no target release is currently set generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 blueprint source: planner with report: planning report: -planner config: - add zones with mupdate override: true - * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: 15 zones - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 15 zones - sled d81c6a84-79b8-4958-ae41-ea46c9b19763: 15 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: target release generation is 1 * discretionary zones placed: * internal_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source install dataset * zone updates waiting on discretionary zones diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 5bd624087e4..cb8b7f81cfb 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -2564,7 +2564,7 @@ planner config: - sleds have deployment units with image sources not set to Artifact: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 6 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: planner config `add_zones_with_mupdate_override` is true * discretionary zone placement waiting for NTP zones on sleds: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * missing NTP zone on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * only placed 0/1 desired nexus zones diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 4e15b736782..331835793d6 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -239,11 +239,24 @@ impl<'a> Planner<'a> { PlanningMgsUpdatesStepReport::new(PendingMgsUpdates::new()) }; - // Likewise for zone additions, unless overridden by the config. + // Likewise for zone additions, unless overridden by the config, or + // unless a target release has never been set (i.e. we're effectively in + // a pre-Nexus-driven-update world). + // + // We don't have to check for the minimum target release generation in + // this case. On a freshly-installed or MUPdated system, Nexus will find + // the mupdate overrides and clear them. The act of clearing mupdate + // overrides always sets the minimum generation to the current target + // release generation plus one, so the minimum generation will always be + // exactly 2. let add_zones_with_mupdate_override = self.input.planner_config().add_zones_with_mupdate_override; + let target_release_generation_is_one = + self.input.tuf_repo().target_release_generation + == Generation::from_u32(1); let mut add = if add_update_blocked_reasons.is_empty() || add_zones_with_mupdate_override + || target_release_generation_is_one { self.do_plan_add(&mgs_updates)? } else { @@ -251,6 +264,7 @@ impl<'a> Planner<'a> { }; add.add_update_blocked_reasons = add_update_blocked_reasons; add.add_zones_with_mupdate_override = add_zones_with_mupdate_override; + add.target_release_generation_is_one = target_release_generation_is_one; let zone_updates = if add.any_discretionary_zones_placed() { // Do not update any zones if we've added any discretionary zones @@ -2723,12 +2737,6 @@ pub(crate) mod test { .build(); verify_blueprint(&blueprint1); - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let input = example .system .to_planning_input_builder() @@ -2922,7 +2930,7 @@ pub(crate) mod test { // Use our example system with one sled and one Nexus instance as a // starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) .nsleds(1) .nexus_count(1) @@ -2935,12 +2943,6 @@ pub(crate) mod test { .map(|sa| sa.sled_id) .unwrap(); - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let input = example .system .to_planning_input_builder() @@ -3035,16 +3037,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // This blueprint should only have 3 Nexus zones: one on each sled. assert_eq!(blueprint1.sleds.len(), 3); for sled_config in blueprint1.sleds.values() { @@ -3132,16 +3128,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, mut blueprint1) = + let (example, mut blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - example .system .to_planning_input_builder() @@ -3286,16 +3276,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // Expunge the first sled we see, which will result in a Nexus external // IP no longer being associated with a running zone, and a new Nexus // zone being added to one of the two remaining sleds. @@ -3404,16 +3388,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let input = example .system .to_planning_input_builder() @@ -3602,14 +3580,9 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though zones are - // currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); let mut builder = example .system @@ -3710,16 +3683,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (mut example, mut blueprint1) = + let (example, mut blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let mut builder = example .system .to_planning_input_builder() @@ -3798,16 +3765,10 @@ pub(crate) mod test { // Create an example system with two sleds. We're going to expunge one // of these sleds. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(2).build(); let mut collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // The initial blueprint configuration has generation 2 let (sled_id, sled_config) = blueprint1.sleds.first_key_value().unwrap(); @@ -4168,19 +4129,13 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Create an example system with a single sled - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) .nsleds(1) .nexus_count(2) .build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let mut builder = example .system .to_planning_input_builder() @@ -4319,16 +4274,10 @@ pub(crate) mod test { // and decommissioned sleds. (When we add more kinds of // non-provisionable states in the future, we'll have to add more // sleds.) - let (mut example, mut blueprint1) = + let (example, mut blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(5).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // This blueprint should only have 5 Nexus zones: one on each sled. assert_eq!(blueprint1.sleds.len(), 5); for sled_config in blueprint1.sleds.values() { @@ -4654,15 +4603,9 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let collection = example.collection; // Expunge one of the sleds. @@ -4947,16 +4890,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // We should start with CRUCIBLE_PANTRY_REDUNDANCY pantries spread out // to at most 1 per sled. Find one of the sleds running one. let pantry_sleds = blueprint1 @@ -5032,16 +4969,10 @@ pub(crate) mod test { let logctx = test_setup_log(TEST_NAME); // Use our example system as a starting point. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // We should start with one ClickHouse zone. Find out which sled it's on. let clickhouse_sleds = blueprint1 .all_omicron_zones(BlueprintZoneDisposition::any) @@ -5113,17 +5044,11 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME).build(); let mut collection = example.collection; verify_blueprint(&blueprint1); - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // We shouldn't have a clickhouse cluster config, as we don't have a // clickhouse policy set yet assert!(blueprint1.clickhouse_cluster_config.is_none()); @@ -5471,16 +5396,10 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&log, TEST_NAME).build(); let mut collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let mut input_builder = example .system .to_planning_input_builder() @@ -5702,16 +5621,10 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&log, TEST_NAME).build(); let collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - let mut input_builder = example .system .to_planning_input_builder() @@ -6098,16 +6011,10 @@ pub(crate) mod test { let log = logctx.log.clone(); // Use our example system. - let (mut example, blueprint1) = + let (example, blueprint1) = ExampleSystemBuilder::new(&log, TEST_NAME).build(); let mut collection = example.collection; - // Set this chicken switch so that zones are added even though image - // sources are currently InstallDataset. - let mut config = example.system.get_planner_config(); - config.add_zones_with_mupdate_override = true; - example.system.set_planner_config(config); - // Find a internal DNS zone we'll use for our test. let (sled_id, internal_dns_config) = blueprint1 .sleds diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index fe3bed2dc01..4ea7fefc613 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -328,15 +328,12 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd blueprint source: planner with report: planning report: -planner config: - add zones with mupdate override: true - * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled d67ce8f0-a691-4010-b414-420d82e80527: 14 zones - sled fefcf4cf-f7e7-46b3-b629-058526ce440e: 14 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: target release generation is 1 * discretionary zones placed: * crucible_pantry zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset * nexus zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 558b3553ca3..9c79eb229be 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -516,16 +516,13 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b blueprint source: planner with report: planning report: -planner config: - add zones with mupdate override: true - * zone adds and updates are blocked: - sleds have deployment units with image sources not set to Artifact: - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: 15 zones - sled 75bc286f-2b4b-482c-9431-59272af529da: 12 zones - sled affab35f-600a-4109-8ea0-34a067a4e0bc: 12 zones -* adding zones despite being blocked, as specified by the `add_zones_with_mupdate_override` planner config option +* adding zones despite being blocked, because: target release generation is 1 * discretionary zones placed: * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index b933ee55e8c..2bb0e33db2e 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -359,14 +359,7 @@ mod test { version: 1, config: ReconfiguratorConfig { planner_enabled: true, - planner_config: PlannerConfig { - // Set this config to true because we'd like to test - // adding zones even if no target release is set. In the - // future, we'll allow adding zones if no target release - // has ever been set, in which case we can go back to - // setting this field to false. - add_zones_with_mupdate_override: true, - }, + planner_config: PlannerConfig::default(), }, time_modified: now_db_precision(), }), diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index d4c9ac4c33a..c69368da03f 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -563,6 +563,10 @@ pub struct PlanningAddStepReport { /// MUPdate-related reasons.) pub add_zones_with_mupdate_override: bool, + /// Set to true if the target release generation is 1, which would allow + /// zones to be added. + pub target_release_generation_is_one: bool, + pub sleds_without_ntp_zones_in_inventory: BTreeSet, pub sleds_without_zpools_for_ntp_zones: BTreeSet, pub sleds_waiting_for_ntp_zone: BTreeSet, @@ -590,6 +594,7 @@ impl PlanningAddStepReport { waiting_on: None, add_update_blocked_reasons: Vec::new(), add_zones_with_mupdate_override: false, + target_release_generation_is_one: false, sleds_without_ntp_zones_in_inventory: BTreeSet::new(), sleds_without_zpools_for_ntp_zones: BTreeSet::new(), sleds_waiting_for_ntp_zone: BTreeSet::new(), @@ -689,6 +694,7 @@ impl fmt::Display for PlanningAddStepReport { waiting_on, add_update_blocked_reasons, add_zones_with_mupdate_override, + target_release_generation_is_one, sleds_without_ntp_zones_in_inventory, sleds_without_zpools_for_ntp_zones, sleds_waiting_for_ntp_zone, @@ -718,12 +724,21 @@ impl fmt::Display for PlanningAddStepReport { } } + let mut add_zones_despite_being_blocked_reasons = Vec::new(); if *add_zones_with_mupdate_override { + add_zones_despite_being_blocked_reasons.push( + "planner config `add_zones_with_mupdate_override` is true", + ); + } + if *target_release_generation_is_one { + add_zones_despite_being_blocked_reasons + .push("target release generation is 1"); + } + if !add_zones_despite_being_blocked_reasons.is_empty() { writeln!( f, - "* adding zones despite being blocked, \ - as specified by the `add_zones_with_mupdate_override` \ - planner config option" + "* adding zones despite being blocked, because: {}", + add_zones_despite_being_blocked_reasons.join(", "), )?; } diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index a2320acb492..c88486f9442 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -4289,6 +4289,10 @@ "$ref": "#/components/schemas/PlanningAddSufficientZonesExist" } }, + "target_release_generation_is_one": { + "description": "Set to true if the target release generation is 1, which would allow zones to be added.", + "type": "boolean" + }, "waiting_on": { "nullable": true, "description": "What are we waiting on to start zone additions?", @@ -4310,7 +4314,8 @@ "sleds_waiting_for_ntp_zone", "sleds_without_ntp_zones_in_inventory", "sleds_without_zpools_for_ntp_zones", - "sufficient_zones_exist" + "sufficient_zones_exist", + "target_release_generation_is_one" ] }, "PlanningAddSufficientZonesExist": { From 19b9f167c7cf4b15f50af48672d68302715fb9a2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 24 Sep 2025 14:18:51 -0700 Subject: [PATCH 14/18] [nexus] Auto-update schema on boot (#9030) Fixes https://github.com/oxidecomputer/omicron/issues/8912 Should be merged after the rest of Nexus quiesce/handoff is complete. --- smf/nexus/multi-sled/config-partial.toml | 9 ++++----- smf/nexus/single-sled/config-partial.toml | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index b0a7c1076dd..5548c926122 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -18,11 +18,10 @@ mode = "file" path = "/dev/stdout" if_exists = "append" -# TODO: Uncomment the following lines to enable automatic schema -# migration on boot. -# -# [schema] -# schema_dir = "/var/nexus/schema/crdb" +# This allows Nexus to see the CRDB schema, and apply schema updates when it +# boots. +[schema] +schema_dir = "/var/nexus/schema/crdb" [background_tasks] dns_internal.period_secs_config = 60 diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index 888c4a06454..005a4f83dbb 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -18,11 +18,10 @@ mode = "file" path = "/dev/stdout" if_exists = "append" -# TODO: Uncomment the following lines to enable automatic schema -# migration on boot. -# -# [schema] -# schema_dir = "/var/nexus/schema/crdb" +# This allows Nexus to see the CRDB schema, and apply schema updates when it +# boots. +[schema] +schema_dir = "/var/nexus/schema/crdb" [background_tasks] dns_internal.period_secs_config = 60 From ef614ba3b6de473954db797cccd4f67539367355 Mon Sep 17 00:00:00 2001 From: iliana etaoin Date: Wed, 24 Sep 2025 14:33:50 -0700 Subject: [PATCH 15/18] fix nexus-lockstep semantic merge conflict, #9037 vs #9063 (#9077) --- openapi/nexus-lockstep.json | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/openapi/nexus-lockstep.json b/openapi/nexus-lockstep.json index 8dbc8b3c484..a03852f4d5a 100644 --- a/openapi/nexus-lockstep.json +++ b/openapi/nexus-lockstep.json @@ -7087,6 +7087,21 @@ "type" ] }, + { + "description": "Waiting on zones to propagate to inventory.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "inventory_propagation" + ] + } + }, + "required": [ + "type" + ] + }, { "description": "Waiting on updates to RoT / SP / Host OS / etc.", "type": "object", From c6e0b77c19f842ae483ee73c17b1af96bf092ac9 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 24 Sep 2025 18:17:10 -0400 Subject: [PATCH 16/18] PlanningReport: Use JSON-safe map keys (#9080) (This also includes #9077 to avoid failing CI.) --------- Co-authored-by: iliana etaoin --- .../tests/output/cmds-target-release-stdout | 12 +++---- .../app/background/tasks/blueprint_planner.rs | 12 ++++++- nexus/types/src/deployment/planning_report.rs | 34 +++++++------------ 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index b9b99c77fce..4eeec8d4ec8 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -7944,9 +7944,9 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 3 remaining out-of-date zones * 3 zones waiting to be expunged: - * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus): image out-of-date, but zone's nexus_generation 1 is still active - * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus): image out-of-date, but zone's nexus_generation 1 is still active - * zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038: nexus image out-of-date, but nexus_generation 1 is still active + * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6: nexus image out-of-date, but nexus_generation 1 is still active + * zone 466a9f29-62bf-4e63-924a-b9efdb86afec: nexus image out-of-date, but nexus_generation 1 is still active * updating top-level nexus_generation to: 2 @@ -8035,9 +8035,9 @@ planning report: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 9 zones are already from artifacts * 3 remaining out-of-date zones * 3 zones waiting to be expunged: - * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (nexus): image out-of-date, but zone's nexus_generation 1 is still active - * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (nexus): image out-of-date, but zone's nexus_generation 1 is still active - * zone 466a9f29-62bf-4e63-924a-b9efdb86afec (nexus): image out-of-date, but zone's nexus_generation 1 is still active + * zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038: nexus image out-of-date, but nexus_generation 1 is still active + * zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6: nexus image out-of-date, but nexus_generation 1 is still active + * zone 466a9f29-62bf-4e63-924a-b9efdb86afec: nexus image out-of-date, but nexus_generation 1 is still active diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index 2bb0e33db2e..00aad12eae6 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -22,6 +22,7 @@ use omicron_common::api::external::LookupType; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid as _; use serde_json::json; +use slog_error_chain::InlineErrorChain; use std::sync::Arc; use tokio::sync::watch::{self, Receiver, Sender}; @@ -292,7 +293,16 @@ impl BackgroundTask for BlueprintPlanner { &'a mut self, opctx: &'a OpContext, ) -> BoxFuture<'a, serde_json::Value> { - Box::pin(async move { json!(self.plan(opctx).await) }) + Box::pin(async move { + let status = self.plan(opctx).await; + match serde_json::to_value(status) { + Ok(val) => val, + Err(err) => json!({ + "error": format!("could not serialize task status: {}", + InlineErrorChain::new(&err)), + }), + } + }) } } diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index c69368da03f..159cd86c4d3 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -848,8 +848,8 @@ pub struct PlanningZoneUpdatesStepReport { pub out_of_date_zones: BTreeMap>, pub expunged_zones: BTreeMap>, pub updated_zones: BTreeMap>, - pub unsafe_zones: BTreeMap, - pub waiting_zones: BTreeMap, + pub unsafe_zones: BTreeMap, + pub waiting_zones: BTreeMap, } impl PlanningZoneUpdatesStepReport { @@ -934,7 +934,7 @@ impl PlanningZoneUpdatesStepReport { zone: &BlueprintZoneConfig, reason: ZoneUnsafeToShutdown, ) { - self.unsafe_zones.insert(zone.clone(), reason); + self.unsafe_zones.insert(zone.id, reason); } pub fn waiting_zone( @@ -942,7 +942,7 @@ impl PlanningZoneUpdatesStepReport { zone: &BlueprintZoneConfig, reason: ZoneWaitingToExpunge, ) { - self.waiting_zones.insert(zone.clone(), reason); + self.waiting_zones.insert(zone.id, reason); } } @@ -1001,28 +1001,16 @@ impl fmt::Display for PlanningZoneUpdatesStepReport { if !unsafe_zones.is_empty() { let (n, s) = plural_map(unsafe_zones); writeln!(f, "* {n} zone{s} not ready to shut down safely:")?; - for (zone, reason) in unsafe_zones.iter() { - writeln!( - f, - " * zone {} ({}): {}", - zone.id, - zone.zone_type.kind().report_str(), - reason, - )?; + for (zone_id, reason) in unsafe_zones.iter() { + writeln!(f, " * zone {zone_id}: {reason}")?; } } if !waiting_zones.is_empty() { let (n, s) = plural_map(waiting_zones); writeln!(f, "* {n} zone{s} waiting to be expunged:")?; - for (zone, reason) in waiting_zones.iter() { - writeln!( - f, - " * zone {} ({}): {}", - zone.id, - zone.zone_type.kind().report_str(), - reason, - )?; + for (zone_id, reason) in waiting_zones.iter() { + writeln!(f, " * zone {zone_id}: {reason}")?; } } @@ -1076,7 +1064,9 @@ pub enum ZoneUnsafeToShutdown { impl fmt::Display for ZoneUnsafeToShutdown { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Self::Cockroachdb { reason } => write!(f, "{reason}"), + Self::Cockroachdb { reason } => { + write!(f, "cockroach unsafe to shut down: {reason}") + } Self::BoundaryNtp { total_boundary_ntp_zones: t, synchronized_count: s, @@ -1107,7 +1097,7 @@ impl fmt::Display for ZoneWaitingToExpunge { Self::Nexus { zone_generation } => { write!( f, - "image out-of-date, but zone's nexus_generation \ + "nexus image out-of-date, but nexus_generation \ {zone_generation} is still active" ) } From 092f52f72f2fadcd98b43733cd7079362609b9e8 Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 24 Sep 2025 16:01:17 -0700 Subject: [PATCH 17/18] [reconfigurator-planning] clear remove-mupdate-override from blueprint even if no target release is set (#9082) Currently: * if a target release is set, we go ahead and clear the remove-mupdate-override instruction from blueprints, regardless of whether artifacts match * if no target release is set, we don't do that This behavior is inconsistent. We shouldn't gate the mupdate override part of the state machine on a target release not being set. --- ...update-override-without-target-release.txt | 24 ++ ...mds-add-zones-with-mupdate-override-stdout | 12 +- ...ate-override-without-target-release-stderr | 0 ...ate-override-without-target-release-stdout | 353 ++++++++++++++++++ .../planning/src/blueprint_builder/builder.rs | 10 - .../src/blueprint_editor/sled_editor.rs | 14 +- openapi/nexus-lockstep.json | 7 +- 7 files changed, 403 insertions(+), 17 deletions(-) create mode 100644 dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-override-without-target-release.txt create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stderr create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stdout diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-override-without-target-release.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-override-without-target-release.txt new file mode 100644 index 00000000000..01341f15ced --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-override-without-target-release.txt @@ -0,0 +1,24 @@ +# Test clearing the mupdate override if a target release isn't set. +load-example --nsleds 1 --ndisks-per-sled 3 + +# Create a TUF repository from a fake manifest. We don't set this as the +# target release; rather, we just MUPdate to it (also setting the mupdate +# override). +tuf-assemble ../../update-common/manifests/fake.toml +sled-update-install-dataset serial0 --from-repo repo-1.0.0.zip +sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 + +# Generate a new inventory and plan against that. This diff will have the +# "will remove mupdate override" message within it. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Now apply the remove-mupdate-override instruction to the sled. +sled-set serial0 mupdate-override unset + +# Plan again. This diff should show that the remove-mupdate-override instruction +# was removed from the blueprint. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout index 1ee59db0ce8..040173296fc 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-zones-with-mupdate-override-stdout @@ -194,7 +194,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -255,7 +255,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS: DNS zone: "control-plane.oxide.internal" (unchanged) - unchanged names: 51 (records: 65) + unchanged names: 52 (records: 68) external DNS: DNS zone: "oxide.example" (unchanged) @@ -386,6 +386,14 @@ internal DNS: * DNS zone: "control-plane.oxide.internal": + name: 571a8adf-f0b8-458c-8e6c-5a71e82af7ae.host (records: 1) + AAAA fd00:1122:3344:103::28 +* name: _nexus-lockstep._tcp (records: 3 -> 4) +- SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12232 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12232 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12232 571a8adf-f0b8-458c-8e6c-5a71e82af7ae.host.control-plane.oxide.internal * name: _nexus._tcp (records: 3 -> 4) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stderr b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stderr new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stdout new file mode 100644 index 00000000000..40834f677b5 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-override-without-target-release-stdout @@ -0,0 +1,353 @@ +using provided RNG seed: reconfigurator-cli-test +> # Test clearing the mupdate override if a target release isn't set. +> load-example --nsleds 1 --ndisks-per-sled 3 +loaded example system with: +- collection: f45ba181-4b56-42cc-a762-874d90184a43 +- blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + + +> # Create a TUF repository from a fake manifest. We don't set this as the +> # target release; rather, we just MUPdate to it (also setting the mupdate +> # override). +> tuf-assemble ../../update-common/manifests/fake.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler +created repo-1.0.0.zip for system version 1.0.0 + +> sled-update-install-dataset serial0 --from-repo repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 1.0.0, hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, length: 732 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, length: 783 +INFO added artifact, name: fake-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, length: 783 +INFO added artifact, name: fake-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, length: 797 +INFO added artifact, name: fake-host, kind: gimlet_host_phase_1, version: 1.0.0, hash: b99d5273ba1418bebb19d74b701d716896409566d41de76ada71bded4c9b166b, length: 524288 +INFO added artifact, name: fake-host, kind: cosmo_host_phase_1, version: 1.0.0, hash: 9525f567106549a3fc32df870a74803d77e51dcc44190b218e227a2c5d444f58, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: d944ae205b61ccf4322448f7d0311a819c53d9844769de066c5307c1682abb47, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: gimlet_trampoline_phase_1, version: 1.0.0, hash: bcb27520ee5a56e19f6df9662c66d69ac681fbd873a97547be5f6a5ae3d250c4, length: 524288 +INFO added artifact, name: fake-trampoline, kind: cosmo_trampoline_phase_1, version: 1.0.0, hash: e235b8afb58ee69d966853bd5efe7c7e904da84b9035a332b3e691dc1d5cdbd0, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: a5dfcc4bc69b791f1c509df499e9e72cce844cb2b53a56d8bb357b264bdf13b6, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: 89245fe2ac7e6a2ac8dfa4e7d6891a6e6df95e4141395c07c64026778f6d76d7, length: 721 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 5d8ea834dd6d42d386f1eb8a2c5f6e99b697c9958bb4ab8edf63e56003e25d8d, length: 775 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 5d8ea834dd6d42d386f1eb8a2c5f6e99b697c9958bb4ab8edf63e56003e25d8d, length: 775 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 18c9c774bfe4bb086e869509dcccacee8476fd87670692b765aee216f2c7f003, length: 805 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: bf1bc1da5059f76182c3007c3049941f8898abede2f3765b106c6e7f7c42d44c, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 32307d6d75c9707e8499ba4a4d379f99c0358237b6e190ff6a8024b470f62342, length: 774 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 32307d6d75c9707e8499ba4a4d379f99c0358237b6e190ff6a8024b470f62342, length: 774 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 70836d170abd5621f95bb4225987b27b3d3dd6168e73cd60e44309bdfeb94e98, length: 804 +INFO added artifact, name: installinator_document, kind: installinator_document, version: 1.0.0, hash: a6a636b5d57813578766b3f1c2559abf9af5d8c86187538167937c476beeefa3, length: 367 +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: from repo at repo-1.0.0.zip (system version 1.0.0) + +> sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + +> # Generate a new inventory and plan against that. This diff will have the +> # "will remove mupdate override" message within it. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest latest +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, new_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971, prev_bp_override: None, zones: + - zone 005548be-c5e4-49ff-a27b-88f314f1bc51 (ExternalDns) left unchanged, image source: install dataset + - zone 10676bfe-b61f-40e1-bc07-a4cb76ea1f30 (Clickhouse) left unchanged, image source: install dataset + - zone 2205353a-e1d2-48ff-863b-9d6b1487d474 (ExternalDns) left unchanged, image source: install dataset + - zone 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4 (Nexus) left unchanged, image source: install dataset + - zone 2de1c525-4e04-4ba6-ac6b-377aaa542f96 (CruciblePantry) left unchanged, image source: install dataset + - zone 31b26053-8b94-4d8c-9e4a-d7d720afe265 (CruciblePantry) left unchanged, image source: install dataset + - zone 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe (Nexus) left unchanged, image source: install dataset + - zone 3d9b7487-d7b9-4e25-960f-f2086f3e2919 (Crucible) left unchanged, image source: install dataset + - zone 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035 (InternalDns) left unchanged, image source: install dataset + - zone 45ce130e-c5ac-4e26-ab73-7589ba634418 (InternalDns) left unchanged, image source: install dataset + - zone 4b19d194-8b25-4396-88da-3df1b3788601 (Crucible) left unchanged, image source: install dataset + - zone 70aab480-3d6c-47d5-aaf9-8d2ddab2931c (ExternalDns) left unchanged, image source: install dataset + - zone 9b135c74-c09a-4dcc-ba19-f6f8deae135a (InternalNtp) left unchanged, image source: install dataset + - zone a7b7bfbe-0588-4781-9a5e-fba63584e5d2 (InternalDns) left unchanged, image source: install dataset + - zone c204730a-0946-4793-a470-64c88e89da96 (Crucible) left unchanged, image source: install dataset + - zone c2cbbf34-b852-4164-a572-01d4d79445a1 (Nexus) left unchanged, image source: install dataset + - zone f87ada51-4419-4144-86a8-e5e4ff0f64d3 (CruciblePantry) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: current contents (unchanged) + - host phase 2 slot B: current contents (unchanged) + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 1, new_generation: 2 +INFO skipping noop image source check for all sleds, reason: no target release is currently set +generated blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 based on parent blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +blueprint source: planner with report: +planning report: +* zone adds and updates are blocked: + - current target release generation (1) is lower than minimum required by blueprint (2) + - sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 + - sleds have deployment units with image sources not set to Artifact: + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 17 zones + +* adding zones despite being blocked, because: target release generation is 1 +* zone updates waiting on zone add blockers + + + +> blueprint-diff latest +from: blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): ++ will remove mupdate override: (none) -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 8355e747-b375-4d9e-b5da-03d9540ab5cd in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible d4f66eab-2870-4a74-b4d5-ae5da3276682 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible c6d7ecb4-135c-4f09-8948-b73fec13db76 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/clickhouse 53e5b466-aac5-4cf0-a93b-9f68f58ac755 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 66788e15-bb3a-4369-a5cb-3357f7b85f64 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns d88eb02f-b66d-4ae7-91a1-23154417c1f1 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/external_dns ebfd425c-4e5d-4385-a105-8f2ce32fa9ec in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ac802759-2ac3-4c5c-b8e7-6d51689e1537 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 6b672182-6d17-41d5-81d7-19debe7d3f9b in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/internal_dns b1007e3d-7aaf-4ccb-9773-241dcbf79b21 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone e4b83186-c6e2-4d33-ae1b-803c32d1a86e in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone f306ebde-464c-49ef-94eb-1d72cf9afc7e in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 20cb5d98-bcd7-4a11-9190-7570a0aa6fe0 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_clickhouse_10676bfe-b61f-40e1-bc07-a4cb76ea1f30 647f344c-b4c5-4788-a732-40658e852f63 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_3d9b7487-d7b9-4e25-960f-f2086f3e2919 f822a7e2-28aa-4ae2-ba86-d4e552a15bcb in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_4b19d194-8b25-4396-88da-3df1b3788601 bef875a9-3cf1-4b02-9682-c0286fb23d4d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_c204730a-0946-4793-a470-64c88e89da96 c7e90d9c-22db-43ca-a602-2a295bac7eec in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_pantry_2de1c525-4e04-4ba6-ac6b-377aaa542f96 0c7e4b2a-502a-4f4c-8ac4-4068db25252e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_31b26053-8b94-4d8c-9e4a-d7d720afe265 49dc7c86-c865-457f-8d58-37b92b002691 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_f87ada51-4419-4144-86a8-e5e4ff0f64d3 d3cd3ec4-703f-4e73-8cc2-b4bc19c7d596 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_005548be-c5e4-49ff-a27b-88f314f1bc51 506ddf0e-8cbf-44a9-93d7-130a550fd42d in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_external_dns_2205353a-e1d2-48ff-863b-9d6b1487d474 382cbbf2-8b08-4388-839a-b43b8bc82999 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_70aab480-3d6c-47d5-aaf9-8d2ddab2931c d1611973-b22d-4097-b6fd-0a480f2199a5 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_3fd06852-06cb-4d8a-b4b2-eb88ff5a6035 8b89525f-0764-4937-9fa8-022242647c0f in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_45ce130e-c5ac-4e26-ab73-7589ba634418 8144f1ee-dbef-45c1-9397-2031a9ca8b38 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_internal_dns_a7b7bfbe-0588-4781-9a5e-fba63584e5d2 3523db58-10ab-4e2c-95d9-fec43036c0f5 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_nexus_2bc0b0c4-63a9-44cc-afff-76ce645ef1d4 aa860565-2bbb-4a70-ae8e-7d8f29f1a536 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe 4d322f8a-d461-4ead-995b-9ba7d76becad in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_c2cbbf34-b852-4164-a572-01d4d79445a1 2aeeb036-0e23-419f-82f7-9ab04c06d7ec in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_9b135c74-c09a-4dcc-ba19-f6f8deae135a f4ea4a15-b3b3-4a67-b61a-8fba7c7ab61f in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 38fafd69-0275-4f4d-885d-3bbf2ea77551 in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cb8d5371-640d-4364-8a5e-0fde125d28af in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug ba445402-2775-4f97-87e9-640be3a00c6f in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 10676bfe-b61f-40e1-bc07-a4cb76ea1f30 install dataset in service fd00:1122:3344:101::25 + crucible 3d9b7487-d7b9-4e25-960f-f2086f3e2919 install dataset in service fd00:1122:3344:101::2e + crucible 4b19d194-8b25-4396-88da-3df1b3788601 install dataset in service fd00:1122:3344:101::2d + crucible c204730a-0946-4793-a470-64c88e89da96 install dataset in service fd00:1122:3344:101::2c + crucible_pantry 2de1c525-4e04-4ba6-ac6b-377aaa542f96 install dataset in service fd00:1122:3344:101::2b + crucible_pantry 31b26053-8b94-4d8c-9e4a-d7d720afe265 install dataset in service fd00:1122:3344:101::29 + crucible_pantry f87ada51-4419-4144-86a8-e5e4ff0f64d3 install dataset in service fd00:1122:3344:101::2a + external_dns 005548be-c5e4-49ff-a27b-88f314f1bc51 install dataset in service fd00:1122:3344:101::27 + external_dns 2205353a-e1d2-48ff-863b-9d6b1487d474 install dataset in service fd00:1122:3344:101::28 + external_dns 70aab480-3d6c-47d5-aaf9-8d2ddab2931c install dataset in service fd00:1122:3344:101::26 + internal_dns 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035 install dataset in service fd00:1122:3344:2::1 + internal_dns 45ce130e-c5ac-4e26-ab73-7589ba634418 install dataset in service fd00:1122:3344:1::1 + internal_dns a7b7bfbe-0588-4781-9a5e-fba63584e5d2 install dataset in service fd00:1122:3344:3::1 + internal_ntp 9b135c74-c09a-4dcc-ba19-f6f8deae135a install dataset in service fd00:1122:3344:101::21 + nexus 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4 install dataset in service fd00:1122:3344:101::24 + nexus 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe install dataset in service fd00:1122:3344:101::23 + nexus c2cbbf34-b852-4164-a572-01d4d79445a1 install dataset in service fd00:1122:3344:101::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 1 -> 2 + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 36 (records: 48) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + + +> # Now apply the remove-mupdate-override instruction to the sled. +> sled-set serial0 mupdate-override unset +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> unset + + +> # Plan again. This diff should show that the remove-mupdate-override instruction +> # was removed from the blueprint. +> inventory-generate +generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds + +> blueprint-plan latest latest +INFO inventory override no longer exists, blueprint override cleared, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, prev_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971 +INFO skipping noop image source check for all sleds, reason: no target release is currently set +generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +blueprint source: planner with report: +planning report: +* zone adds and updates are blocked: + - current target release generation (1) is lower than minimum required by blueprint (2) + - sleds have deployment units with image sources not set to Artifact: + - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: 17 zones + +* adding zones despite being blocked, because: target release generation is 1 +* zone updates waiting on zone add blockers + + + +> blueprint-diff latest +from: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): +- will remove mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> (none) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 8355e747-b375-4d9e-b5da-03d9540ab5cd in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible d4f66eab-2870-4a74-b4d5-ae5da3276682 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible c6d7ecb4-135c-4f09-8948-b73fec13db76 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/clickhouse 53e5b466-aac5-4cf0-a93b-9f68f58ac755 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 66788e15-bb3a-4369-a5cb-3357f7b85f64 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns d88eb02f-b66d-4ae7-91a1-23154417c1f1 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/external_dns ebfd425c-4e5d-4385-a105-8f2ce32fa9ec in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ac802759-2ac3-4c5c-b8e7-6d51689e1537 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 6b672182-6d17-41d5-81d7-19debe7d3f9b in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/internal_dns b1007e3d-7aaf-4ccb-9773-241dcbf79b21 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone e4b83186-c6e2-4d33-ae1b-803c32d1a86e in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone f306ebde-464c-49ef-94eb-1d72cf9afc7e in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 20cb5d98-bcd7-4a11-9190-7570a0aa6fe0 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_clickhouse_10676bfe-b61f-40e1-bc07-a4cb76ea1f30 647f344c-b4c5-4788-a732-40658e852f63 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_3d9b7487-d7b9-4e25-960f-f2086f3e2919 f822a7e2-28aa-4ae2-ba86-d4e552a15bcb in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_4b19d194-8b25-4396-88da-3df1b3788601 bef875a9-3cf1-4b02-9682-c0286fb23d4d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_c204730a-0946-4793-a470-64c88e89da96 c7e90d9c-22db-43ca-a602-2a295bac7eec in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_pantry_2de1c525-4e04-4ba6-ac6b-377aaa542f96 0c7e4b2a-502a-4f4c-8ac4-4068db25252e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_31b26053-8b94-4d8c-9e4a-d7d720afe265 49dc7c86-c865-457f-8d58-37b92b002691 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_f87ada51-4419-4144-86a8-e5e4ff0f64d3 d3cd3ec4-703f-4e73-8cc2-b4bc19c7d596 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_005548be-c5e4-49ff-a27b-88f314f1bc51 506ddf0e-8cbf-44a9-93d7-130a550fd42d in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_external_dns_2205353a-e1d2-48ff-863b-9d6b1487d474 382cbbf2-8b08-4388-839a-b43b8bc82999 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_70aab480-3d6c-47d5-aaf9-8d2ddab2931c d1611973-b22d-4097-b6fd-0a480f2199a5 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_3fd06852-06cb-4d8a-b4b2-eb88ff5a6035 8b89525f-0764-4937-9fa8-022242647c0f in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_45ce130e-c5ac-4e26-ab73-7589ba634418 8144f1ee-dbef-45c1-9397-2031a9ca8b38 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_internal_dns_a7b7bfbe-0588-4781-9a5e-fba63584e5d2 3523db58-10ab-4e2c-95d9-fec43036c0f5 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_nexus_2bc0b0c4-63a9-44cc-afff-76ce645ef1d4 aa860565-2bbb-4a70-ae8e-7d8f29f1a536 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe 4d322f8a-d461-4ead-995b-9ba7d76becad in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_c2cbbf34-b852-4164-a572-01d4d79445a1 2aeeb036-0e23-419f-82f7-9ab04c06d7ec in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_9b135c74-c09a-4dcc-ba19-f6f8deae135a f4ea4a15-b3b3-4a67-b61a-8fba7c7ab61f in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 38fafd69-0275-4f4d-885d-3bbf2ea77551 in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cb8d5371-640d-4364-8a5e-0fde125d28af in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug ba445402-2775-4f97-87e9-640be3a00c6f in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 10676bfe-b61f-40e1-bc07-a4cb76ea1f30 install dataset in service fd00:1122:3344:101::25 + crucible 3d9b7487-d7b9-4e25-960f-f2086f3e2919 install dataset in service fd00:1122:3344:101::2e + crucible 4b19d194-8b25-4396-88da-3df1b3788601 install dataset in service fd00:1122:3344:101::2d + crucible c204730a-0946-4793-a470-64c88e89da96 install dataset in service fd00:1122:3344:101::2c + crucible_pantry 2de1c525-4e04-4ba6-ac6b-377aaa542f96 install dataset in service fd00:1122:3344:101::2b + crucible_pantry 31b26053-8b94-4d8c-9e4a-d7d720afe265 install dataset in service fd00:1122:3344:101::29 + crucible_pantry f87ada51-4419-4144-86a8-e5e4ff0f64d3 install dataset in service fd00:1122:3344:101::2a + external_dns 005548be-c5e4-49ff-a27b-88f314f1bc51 install dataset in service fd00:1122:3344:101::27 + external_dns 2205353a-e1d2-48ff-863b-9d6b1487d474 install dataset in service fd00:1122:3344:101::28 + external_dns 70aab480-3d6c-47d5-aaf9-8d2ddab2931c install dataset in service fd00:1122:3344:101::26 + internal_dns 3fd06852-06cb-4d8a-b4b2-eb88ff5a6035 install dataset in service fd00:1122:3344:2::1 + internal_dns 45ce130e-c5ac-4e26-ab73-7589ba634418 install dataset in service fd00:1122:3344:1::1 + internal_dns a7b7bfbe-0588-4781-9a5e-fba63584e5d2 install dataset in service fd00:1122:3344:3::1 + internal_ntp 9b135c74-c09a-4dcc-ba19-f6f8deae135a install dataset in service fd00:1122:3344:101::21 + nexus 2bc0b0c4-63a9-44cc-afff-76ce645ef1d4 install dataset in service fd00:1122:3344:101::24 + nexus 3c2cd97f-7b4a-4ff3-a9d5-6ce141fcdbbe install dataset in service fd00:1122:3344:101::23 + nexus c2cbbf34-b852-4164-a572-01d4d79445a1 install dataset in service fd00:1122:3344:101::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 2 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + unchanged names: 36 (records: 48) + +external DNS: + DNS zone: "oxide.example" (unchanged) + unchanged names: 5 (records: 9) + + + diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index b76a6b7fbdd..e319d9d891d 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -15,7 +15,6 @@ use crate::blueprint_editor::NoAvailableDnsSubnets; use crate::blueprint_editor::SledEditError; use crate::blueprint_editor::SledEditor; use crate::mgs_updates::PendingHostPhase2Changes; -use crate::planner::NoopConvertGlobalIneligibleReason; use crate::planner::NoopConvertInfo; use crate::planner::NoopConvertSledIneligibleReason; use crate::planner::ZoneExpungeReason; @@ -2739,9 +2738,6 @@ impl IdOrdItem for EnsureMupdateOverrideUpdatedZone { /// though inventory no longer has the sled. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum BpMupdateOverrideNotClearedReason { - /// There is a global reason noop conversions are not possible. - NoopGlobalIneligible(NoopConvertGlobalIneligibleReason), - /// There is a sled-specific reason noop conversions are not possible. NoopSledIneligible(NoopConvertSledIneligibleReason), } @@ -2749,12 +2745,6 @@ pub(crate) enum BpMupdateOverrideNotClearedReason { impl fmt::Display for BpMupdateOverrideNotClearedReason { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - BpMupdateOverrideNotClearedReason::NoopGlobalIneligible(reason) => { - write!( - f, - "no sleds can be noop-converted to Artifact: {reason}", - ) - } BpMupdateOverrideNotClearedReason::NoopSledIneligible(reason) => { write!( f, diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs index a5e0ef4cae5..8d25fc4a136 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs @@ -9,6 +9,7 @@ use crate::blueprint_builder::EditedSledScalarEdits; use crate::blueprint_builder::EnsureMupdateOverrideAction; use crate::blueprint_builder::EnsureMupdateOverrideUpdatedZone; use crate::blueprint_builder::SledEditCounts; +use crate::planner::NoopConvertGlobalIneligibleReason; use crate::planner::NoopConvertSledEligible; use crate::planner::NoopConvertSledIneligibleReason; use crate::planner::NoopConvertSledInfoMut; @@ -980,10 +981,15 @@ impl ActiveSledEditor { }) } }, - NoopConvertSledInfoMut::GlobalIneligible(reason) => { - Ok(EnsureMupdateOverrideAction::BpOverrideNotCleared { - bp_override, - reason: NoopGlobalIneligible(reason.clone()), + NoopConvertSledInfoMut::GlobalIneligible( + NoopConvertGlobalIneligibleReason::NoTargetRelease, + ) => { + // It's fine to clear the override when there's no + // target release, even though noop conversions aren't + // possible. + self.set_remove_mupdate_override(None); + Ok(EnsureMupdateOverrideAction::BpClearOverride { + prev_bp_override: bp_override, }) } } diff --git a/openapi/nexus-lockstep.json b/openapi/nexus-lockstep.json index a03852f4d5a..456dc63d1d3 100644 --- a/openapi/nexus-lockstep.json +++ b/openapi/nexus-lockstep.json @@ -5222,6 +5222,10 @@ "$ref": "#/components/schemas/PlanningAddSufficientZonesExist" } }, + "target_release_generation_is_one": { + "description": "Set to true if the target release generation is 1, which would allow zones to be added.", + "type": "boolean" + }, "waiting_on": { "nullable": true, "description": "What are we waiting on to start zone additions?", @@ -5243,7 +5247,8 @@ "sleds_waiting_for_ntp_zone", "sleds_without_ntp_zones_in_inventory", "sleds_without_zpools_for_ntp_zones", - "sufficient_zones_exist" + "sufficient_zones_exist", + "target_release_generation_is_one" ] }, "PlanningAddSufficientZonesExist": { From 095d7eca1aaf2749d48cd87e23bd637fe4a455c1 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Thu, 25 Sep 2025 09:01:56 -0400 Subject: [PATCH 18/18] Ignore build metadata when comparing system versions (#9072) See #9071 for context; this is the short/medium-term fix proposed in that issue. --- nexus/src/external_api/http_entrypoints.rs | 63 +++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 42e094ccc7b..a569203d8c4 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6850,7 +6850,10 @@ impl NexusExternalApi for NexusExternalApiImpl { .await? .release_source { - if version > system_version { + if !is_new_target_release_version_allowed( + &version, + &system_version, + ) { return Err(HttpError::for_bad_request( None, format!( @@ -8814,3 +8817,61 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } } + +fn is_new_target_release_version_allowed( + current_version: &semver::Version, + proposed_new_version: &semver::Version, +) -> bool { + let mut current_version = current_version.clone(); + let mut proposed_new_version = proposed_new_version.clone(); + + // Strip out the build metadata; this allows upgrading from one commit on + // the same major/minor/release/patch to another. This isn't always right - + // we shouldn't allow downgrading to an earlier commit - but we don't have + // enough information in the version strings today to determine that. See + // . + current_version.build = semver::BuildMetadata::EMPTY; + proposed_new_version.build = semver::BuildMetadata::EMPTY; + + proposed_new_version >= current_version +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_new_target_release_version_allowed() { + // Updating between versions that differ only in build metadata should + // be allowed in both directions. + let v1: semver::Version = "16.0.0-0.ci+git544f608e05a".parse().unwrap(); + let v2: semver::Version = "16.0.0-0.ci+git8571be38c0b".parse().unwrap(); + assert!(is_new_target_release_version_allowed(&v1, &v2)); + assert!(is_new_target_release_version_allowed(&v2, &v1)); + + // Updating from a version to itself is always allowed. (This is + // important for clearing mupdate overrides.) + assert!(is_new_target_release_version_allowed(&v1, &v1)); + assert!(is_new_target_release_version_allowed(&v2, &v2)); + + // We should be able to upgrade but not downgrade if the versions differ + // in major/minor/patch/prerelease. + for (v1, v2) in [ + ("15.0.0-0.ci+git12345", "16.0.0-0.ci+git12345"), + ("16.0.0-0.ci+git12345", "16.1.0-0.ci+git12345"), + ("16.1.0-0.ci+git12345", "16.1.1-0.ci+git12345"), + ("16.1.1-0.ci+git12345", "16.1.1-1.ci+git12345"), + ] { + let v1: semver::Version = v1.parse().unwrap(); + let v2: semver::Version = v2.parse().unwrap(); + assert!( + is_new_target_release_version_allowed(&v1, &v2), + "should be allowed to upgrade from {v1} to {v2}" + ); + assert!( + !is_new_target_release_version_allowed(&v2, &v1), + "should not be allowed to upgrade from {v1} to {v2}" + ); + } + } +}