From 9421ac205455d77f61c8d7d562571cc1c41d3852 Mon Sep 17 00:00:00 2001 From: Rain Date: Wed, 12 Nov 2025 21:56:54 +0000 Subject: [PATCH] [spr] initial version Created using spr 1.3.6-beta.1 --- Cargo.lock | 20 +- Cargo.toml | 3 - id-map/Cargo.toml | 20 - id-map/src/lib.rs | 647 ------------------ nexus/db-model/Cargo.toml | 2 + nexus/db-model/src/typed_uuid.rs | 44 ++ nexus/db-queries/Cargo.toml | 1 - .../db-queries/src/db/datastore/inventory.rs | 57 +- nexus/mgs-updates/Cargo.toml | 1 - nexus/mgs-updates/src/driver.rs | 53 +- nexus/types/Cargo.toml | 1 - nexus/types/src/deployment.rs | 25 - sled-agent/Cargo.toml | 1 - sled-agent/src/rack_setup/plan/service.rs | 280 ++++---- sled-agent/src/rack_setup/service.rs | 4 +- sled-agent/src/sim/server.rs | 180 ++--- 16 files changed, 357 insertions(+), 982 deletions(-) delete mode 100644 id-map/Cargo.toml delete mode 100644 id-map/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 6846426403a..b95c4c7b6be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4994,20 +4994,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "id-map" -version = "0.1.0" -dependencies = [ - "daft", - "derive-where", - "omicron-workspace-hack", - "proptest", - "schemars 0.8.22", - "serde", - "serde_json", - "test-strategy", -] - [[package]] name = "iddqd" version = "0.3.16" @@ -6714,6 +6700,7 @@ dependencies = [ "oxnet", "parse-display", "pq-sys", + "proptest", "rand 0.9.2", "ref-cast", "regex", @@ -6726,6 +6713,7 @@ dependencies = [ "slog-error-chain", "steno", "strum 0.27.2", + "test-strategy", "thiserror 2.0.17", "tokio", "tufaceous-artifact", @@ -6759,7 +6747,6 @@ dependencies = [ "gateway-types", "hex", "hyper-rustls", - "id-map", "iddqd", "illumos-utils", "internal-dns-resolver", @@ -7028,7 +7015,6 @@ dependencies = [ "gateway-types", "http", "hubtools", - "id-map", "iddqd", "internal-dns-resolver", "internal-dns-types", @@ -7463,7 +7449,6 @@ dependencies = [ "gateway-types", "http", "humantime", - "id-map", "iddqd", "illumos-utils", "indent_write", @@ -8749,7 +8734,6 @@ dependencies = [ "http-range", "hyper", "hyper-staticfile", - "id-map", "iddqd", "illumos-utils", "installinator-common", diff --git a/Cargo.toml b/Cargo.toml index f5d81cc4711..5d79ed0715a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,7 +60,6 @@ members = [ "gateway-cli", "gateway-test-utils", "gateway-types", - "id-map", "illumos-utils", "installinator-api", "installinator-common", @@ -223,7 +222,6 @@ default-members = [ "gateway-cli", "gateway-test-utils", "gateway-types", - "id-map", "illumos-utils", "installinator-api", "installinator-common", @@ -502,7 +500,6 @@ hyper-util = "0.1.16" hyper-rustls = "0.27.7" hyper-staticfile = "0.10.1" iddqd = { version = "0.3.16", features = ["daft", "serde", "schemars08"] } -id-map = { path = "id-map" } illumos-utils = { path = "illumos-utils" } iana-time-zone = "0.1.63" indent_write = "2.2.0" diff --git a/id-map/Cargo.toml b/id-map/Cargo.toml deleted file mode 100644 index 77ad3d3e890..00000000000 --- a/id-map/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "id-map" -version = "0.1.0" -edition = "2021" -license = "MPL-2.0" - -[dependencies] -daft.workspace = true -derive-where.workspace = true -schemars.workspace = true -serde.workspace = true -omicron-workspace-hack.workspace = true - -[dev-dependencies] -proptest.workspace = true -serde_json.workspace = true -test-strategy.workspace = true - -[lints] -workspace = true diff --git a/id-map/src/lib.rs b/id-map/src/lib.rs deleted file mode 100644 index 20b37aa776a..00000000000 --- a/id-map/src/lib.rs +++ /dev/null @@ -1,647 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Provides [`IdMap`], a collection of values that are able to uniquely -//! identify themselves. - -use daft::BTreeMapDiff; -use daft::Diffable; -use derive_where::derive_where; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; -use serde::de::Error as _; -use serde::de::Visitor; -use std::borrow::Borrow; -use std::collections::BTreeMap; -use std::collections::btree_map; -use std::fmt; -use std::fmt::Debug; -use std::marker::PhantomData; -use std::ops::Deref; -use std::ops::DerefMut; - -/// Bounds required for a type to be stored in an [`IdMap`]. -pub trait IdMappable { - /// The identity type of this value. - type Id: Ord + fmt::Debug; - - /// Return an owned identity for this value. - /// - /// This method is called liberally by [`IdMap`]. For example, mutating a - /// value in an [`IdMap`] will call this method at least twice in service of - /// the runtime checks performed by [`RefMut`]. Getting owned `T::Id` values - /// is expected to be cheap. If your identity type is not `Copy`, it should - /// be cheap to `Clone`; e.g., `Arc` may be preferable to `String`. - fn id(&self) -> Self::Id; -} - -type Inner = BTreeMap<::Id, T>; - -/// A 1:1 map ordered by the entries' IDs. -/// -/// `IdMap` is a newtype wrapper around `BTreeMap` that does not allow a -/// mismatch between a key and its associated value (i.e., the keys are -/// guaranteed to be the ID of their associated value). Its implementations of -/// various serialization-related traits all erase the `IdMap` and behave like -/// the inner `BTreeMap` would, although deserialzation performs extra checks to -/// guarantee the key-must-be-its-values-ID property. -/// -/// `IdMap` has the same constraint as `BTreeMap` regarding mutating keys out -/// from under it: -/// -/// > It is a logic error for a key to be modified in such a way that the key’s -/// > ordering relative to any other key, as determined by the Ord trait, -/// > changes while it is in the map. This is normally only possible through -/// > Cell, RefCell, global state, I/O, or unsafe code. The behavior resulting -/// > from such a logic error is not specified, but will be encapsulated to the -/// > BTreeMap that observed the logic error and not result in undefined -/// > behavior. This could include panics, incorrect results, aborts, memory -/// > leaks, and non-termination. -/// -/// Additionally, `IdMap` has the requirement that when any _values_ are -/// mutated, their ID must not change. This is enforced at runtime via the -/// [`RefMut`] wrapper returned by `get_mut()` and `iter_mut()`. When the -/// wrapper is dropped, it will induce a panic if the ID has changed from what -/// the value had when it was retreived from the map. -#[derive_where(Default)] -#[derive_where(Clone, Debug, Eq, PartialEq; T, T::Id)] -pub struct IdMap { - inner: Inner, -} - -impl IdMap { - pub fn new() -> Self { - Self::default() - } - - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - pub fn len(&self) -> usize { - self.inner.len() - } - - pub fn insert(&mut self, entry: T) -> Option { - self.inner.insert(entry.id(), entry) - } - - pub fn remove(&mut self, key: &Q) -> Option - where - T::Id: Borrow, - Q: Ord + ?Sized, - { - self.inner.remove(key) - } - - pub fn first(&self) -> Option<&T> { - self.inner.first_key_value().map(|(_, val)| val) - } - - pub fn get(&self, key: &Q) -> Option<&T> - where - T::Id: Borrow, - Q: Ord + ?Sized, - { - self.inner.get(key) - } - - pub fn get_mut(&mut self, key: &Q) -> Option> - where - T::Id: Borrow, - Q: Ord + ?Sized, - { - self.inner.get_mut(key).map(RefMut::new) - } - - pub fn contains_key(&self, key: &Q) -> bool - where - T::Id: Borrow, - Q: Ord + ?Sized, - { - self.inner.contains_key(key) - } - - pub fn keys(&self) -> btree_map::Keys<'_, T::Id, T> { - self.inner.keys() - } - - pub fn iter(&self) -> btree_map::Values<'_, T::Id, T> { - self.inner.values() - } - - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - IterMut { inner: self.inner.values_mut() } - } - - pub fn into_iter(self) -> btree_map::IntoValues { - self.inner.into_values() - } - - pub fn clear(&mut self) { - self.inner.clear() - } - - pub fn retain FnMut(&'a mut RefMut<'b, T>) -> bool>( - &mut self, - mut f: F, - ) { - self.inner.retain(|_, val| f(&mut RefMut::new(val))) - } - - pub fn entry(&mut self, key: T::Id) -> Entry<'_, T> { - match self.inner.entry(key) { - btree_map::Entry::Vacant(slot) => { - Entry::Vacant(VacantEntry::new(slot)) - } - btree_map::Entry::Occupied(slot) => { - Entry::Occupied(OccupiedEntry::new(slot)) - } - } - } -} - -impl FromIterator for IdMap { - fn from_iter>(iter: I) -> Self { - let inner = iter.into_iter().map(|entry| (entry.id(), entry)).collect(); - Self { inner } - } -} - -impl IntoIterator for IdMap { - type Item = T; - type IntoIter = btree_map::IntoValues; - - fn into_iter(self) -> Self::IntoIter { - self.into_iter() - } -} - -impl<'a, T: IdMappable> IntoIterator for &'a IdMap { - type Item = &'a T; - type IntoIter = btree_map::Values<'a, T::Id, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, T: IdMappable> IntoIterator for &'a mut IdMap { - type Item = RefMut<'a, T>; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl JsonSchema for IdMap -where - T: IdMappable + JsonSchema, - T::Id: JsonSchema, -{ - fn schema_name() -> String { - format!("IdMap{}", T::schema_name()) - } - - fn json_schema( - generator: &mut schemars::r#gen::SchemaGenerator, - ) -> schemars::schema::Schema { - Inner::::json_schema(generator) - } -} - -impl Serialize for IdMap -where - T: IdMappable + Serialize, - T::Id: Serialize, -{ - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.inner.serialize(serializer) - } -} - -impl<'de, T> Deserialize<'de> for IdMap -where - T: IdMappable + Deserialize<'de>, - T::Id: Deserialize<'de>, -{ - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct IdCheckVisitor(PhantomData); - - impl<'d, T> Visitor<'d> for IdCheckVisitor - where - T: IdMappable + Deserialize<'d>, - T::Id: Deserialize<'d>, - { - type Value = IdMap; - - fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("a map keyed by ID") - } - - fn visit_map(self, mut map: A) -> Result - where - A: serde::de::MapAccess<'d>, - { - let mut inner = Inner::::new(); - while let Some((key, value)) = map.next_entry::()? { - if key != value.id() { - return Err(A::Error::custom(format!( - "invalid map: key {:?} maps to value with ID {:?}", - key, - value.id() - ))); - } - if let Some(old) = inner.insert(key, value) { - return Err(A::Error::custom(format!( - "invalid map: duplicate key {:?}", - old.id() - ))); - } - } - Ok(IdMap { inner }) - } - } - - deserializer.deserialize_map(IdCheckVisitor(PhantomData)) - } -} - -impl Diffable for IdMap { - type Diff<'daft> - = BTreeMapDiff<'daft, T::Id, T> - where - T: 'daft; - - fn diff<'daft>(&'daft self, other: &'daft Self) -> Self::Diff<'daft> { - self.inner.diff(&other.inner) - } -} - -/// Wrapper around a `&'a mut T` that panics when dropped if the borrowed -/// value's `id()` has changed since the wrapper was created. -#[derive(Debug)] -pub struct RefMut<'a, T: IdMappable> { - original_id: T::Id, - // Always `Some(_)` until the `RefMut` is consumed by `into_ref()`. - borrowed: Option<&'a mut T>, -} - -impl Drop for RefMut<'_, T> { - fn drop(&mut self) { - if let Some(value) = self.borrowed.as_ref() { - assert_eq!( - self.original_id, - value.id(), - "IdMap values must not change their ID" - ); - } - } -} - -impl Deref for RefMut<'_, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - self.borrowed.as_ref().unwrap() - } -} - -impl DerefMut for RefMut<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.borrowed.as_mut().unwrap() - } -} - -impl<'a, T: IdMappable> RefMut<'a, T> { - fn new(borrowed: &'a mut T) -> Self { - Self { original_id: borrowed.id(), borrowed: Some(borrowed) } - } - - pub fn into_ref(mut self) -> &'a T { - let value = self.borrowed.take().unwrap(); - // We stole `value` so `Drop` won't be able to check this invariant; - // check it ourselves. - assert_eq!( - self.original_id, - value.id(), - "IdMap values must not change their ID" - ); - value - } -} - -pub struct IterMut<'a, T: IdMappable> { - inner: btree_map::ValuesMut<'a, T::Id, T>, -} - -impl<'a, T: IdMappable> Iterator for IterMut<'a, T> { - type Item = RefMut<'a, T>; - - fn next(&mut self) -> Option { - self.inner.next().map(RefMut::new) - } -} - -pub enum Entry<'a, T: IdMappable> { - Vacant(VacantEntry<'a, T>), - Occupied(OccupiedEntry<'a, T>), -} - -pub struct VacantEntry<'a, T: IdMappable> { - inner: btree_map::VacantEntry<'a, T::Id, T>, -} - -impl<'a, T: IdMappable> VacantEntry<'a, T> { - fn new(inner: btree_map::VacantEntry<'a, T::Id, T>) -> Self { - Self { inner } - } - - /// # Panics - /// - /// Panics if `value.id()` does not match the ID used to look up the parent - /// `Entry`. - pub fn insert(self, value: T) -> RefMut<'a, T> { - assert_eq!( - *self.key(), - value.id(), - "VacantEntry::insert() must insert a value with the same ID \ - used to create the entry" - ); - RefMut::new(self.inner.insert(value)) - } - - pub fn key(&self) -> &T::Id { - self.inner.key() - } -} - -pub struct OccupiedEntry<'a, T: IdMappable> { - inner: btree_map::OccupiedEntry<'a, T::Id, T>, -} - -impl<'a, T: IdMappable> OccupiedEntry<'a, T> { - fn new(inner: btree_map::OccupiedEntry<'a, T::Id, T>) -> Self { - Self { inner } - } - - pub fn get(&self) -> &T { - self.inner.get() - } - - pub fn get_mut(&mut self) -> RefMut<'_, T> { - RefMut::new(self.inner.get_mut()) - } - - pub fn into_mut(self) -> RefMut<'a, T> { - RefMut::new(self.inner.into_mut()) - } - - /// # Panics - /// - /// Panics if `value.id()` does not match the ID used to look up the parent - /// `Entry`. - pub fn insert(&mut self, value: T) -> T { - assert_eq!( - self.get().id(), - value.id(), - "VacantEntry::insert() must insert a value with the same ID \ - used to create the entry" - ); - self.inner.insert(value) - } - - pub fn remove(self) -> T { - self.inner.remove() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use daft::Diffable; - use test_strategy::Arbitrary; - use test_strategy::proptest; - - #[derive( - Debug, - Clone, - PartialEq, - Eq, - Arbitrary, - JsonSchema, - Serialize, - Deserialize, - Diffable, - )] - struct TestEntry { - id: u8, - val1: u32, - val2: u32, - } - - impl IdMappable for TestEntry { - type Id = u8; - - fn id(&self) -> Self::Id { - self.id - } - } - - #[proptest] - fn serialization_roundtrip(values: Vec) { - let map: IdMap<_> = values.into_iter().collect(); - - let serialized = serde_json::to_string(&map).unwrap(); - let deserialized: IdMap = - serde_json::from_str(&serialized).unwrap(); - - assert_eq!(map, deserialized); - } - - #[proptest] - fn serialization_is_transparent(values: Vec) { - let map: IdMap<_> = values.into_iter().collect(); - - let serialized = serde_json::to_string(&map).unwrap(); - let serialized_inner = serde_json::to_string(&map.inner).unwrap(); - - assert_eq!(serialized, serialized_inner); - } - - #[test] - fn deserialize_rejects_mismatched_keys() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - - let mut entries = map.inner; - entries.get_mut(&1).unwrap().id = 2; - - let serialized = serde_json::to_string(&entries).unwrap(); - let err = - serde_json::from_str::>(&serialized).unwrap_err(); - let err = err.to_string(); - - assert!( - err.contains("key 1 maps to value with ID 2"), - "unexpected error message: {err:?}" - ); - } - - #[test] - fn deserialize_rejects_duplicates() { - let serialized = r#" - { - "1": {"id": 1, "val1": 2, "val2": 3}, - "1": {"id": 1, "val1": 2, "val2": 3} - } - "#; - - let err = - serde_json::from_str::>(&serialized).unwrap_err(); - let err = err.to_string(); - - assert!( - err.contains("duplicate key 1"), - "unexpected error message: {err:?}" - ); - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn get_mut_panics_if_id_changes() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - map.get_mut(&1).unwrap().id = 2; - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn iter_mut_panics_if_id_changes() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - for mut entry in map.iter_mut() { - entry.id = 2; - } - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn mut_into_iter_panics_if_id_changes() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - for mut entry in &mut map { - entry.id = 2; - } - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn retain_panics_if_id_changes() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - map.retain(|entry| { - entry.id = 2; - true - }); - } - - #[test] - #[should_panic(expected = "must insert a value with the same ID")] - fn vacant_entry_panics_if_id_changes_on_insert() { - let mut map = IdMap::::new(); - match map.entry(0) { - Entry::Vacant(slot) => { - slot.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - } - Entry::Occupied(_) => unreachable!(), - } - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn vacant_entry_panics_if_id_changed_after_insert() { - let mut map = IdMap::::new(); - match map.entry(1) { - Entry::Vacant(slot) => { - let mut val = - slot.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - val.id = 2; - } - Entry::Occupied(_) => unreachable!(), - } - } - - #[test] - #[should_panic(expected = "must insert a value with the same ID")] - fn occupied_entry_panics_if_id_changes_on_insert() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - match map.entry(1) { - Entry::Vacant(_) => unreachable!(), - Entry::Occupied(mut slot) => { - slot.insert(TestEntry { id: 2, val1: 2, val2: 3 }); - } - } - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn occupied_entry_panics_if_id_changed_via_into_mut() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - match map.entry(1) { - Entry::Vacant(_) => unreachable!(), - Entry::Occupied(slot) => { - slot.into_mut().id = 2; - } - } - } - - #[test] - #[should_panic(expected = "IdMap values must not change their ID")] - fn occupied_entry_panics_if_id_changed_via_get_mut() { - let mut map = IdMap::::new(); - map.insert(TestEntry { id: 1, val1: 2, val2: 3 }); - match map.entry(1) { - Entry::Vacant(_) => unreachable!(), - Entry::Occupied(mut slot) => { - slot.get_mut().id = 2; - } - } - } - - #[test] - fn methods_allow_borrowed_ids() { - struct Foo { - id: String, - val: i32, - } - - impl IdMappable for Foo { - type Id = String; - - fn id(&self) -> Self::Id { - self.id.clone() - } - } - - let mut foos = IdMap::new(); - foos.insert(Foo { id: "foo".to_string(), val: 1 }); - - // Id type is `String`, but we can use `&str` for these methods - assert_eq!(foos.get("foo").unwrap().val, 1); - assert_eq!(foos.get_mut("foo").unwrap().val, 1); - assert!(foos.contains_key("foo")); - assert_eq!(foos.remove("foo").unwrap().val, 1); - } -} diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 7965a6ff765..a10756677b6 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -62,3 +62,5 @@ tufaceous-artifact.workspace = true camino-tempfile.workspace = true expectorate.workspace = true regex.workspace = true +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/db-model/src/typed_uuid.rs b/nexus/db-model/src/typed_uuid.rs index 1e54e242f34..b71722a55ee 100644 --- a/nexus/db-model/src/typed_uuid.rs +++ b/nexus/db-model/src/typed_uuid.rs @@ -9,6 +9,7 @@ use diesel::backend::Backend; use diesel::deserialize::{self, FromSql}; use diesel::serialize::{self, ToSql}; use diesel::sql_types; +use iddqd::{Comparable, Equivalent}; use omicron_uuid_kinds::{GenericUuid, TypedUuid, TypedUuidKind}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -115,3 +116,46 @@ impl GenericUuid for DbTypedUuid { self.0.as_untyped_uuid() } } + +impl Equivalent> for DbTypedUuid { + #[inline] + fn equivalent(&self, other: &TypedUuid) -> bool { + self.0.as_untyped_uuid() == other.as_untyped_uuid() + } +} + +impl Comparable> for DbTypedUuid { + #[inline] + fn compare(&self, key: &TypedUuid) -> std::cmp::Ordering { + self.0.as_untyped_uuid().cmp(key.as_untyped_uuid()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use omicron_uuid_kinds::SledUuid; + use std::hash::{BuildHasher, RandomState}; + use test_strategy::proptest; + + /// Test that the `Hash` implementation is consistent, as required by + /// `Equivalent`. + #[proptest] + fn test_hash_equality(id: SledUuid) { + let db_id = DbTypedUuid::from(id); + assert!(db_id.equivalent(&id)); + + let hasher = RandomState::new(); + let id_hash = hasher.hash_one(&id); + let db_id_hash = hasher.hash_one(&db_id); + assert_eq!(id_hash, db_id_hash); + } + + /// Test that the `compare` implementation is consistent, as required by + /// `Comparable`. + #[proptest] + fn test_compare_consistency(id1: SledUuid, id2: SledUuid) { + let db_id1 = DbTypedUuid::from(id1); + assert_eq!(db_id1.compare(&id2), id1.cmp(&id2)); + } +} diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 93b12059e12..ddbaaaa2343 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -24,7 +24,6 @@ diesel-dtrace.workspace = true dropshot.workspace = true futures.workspace = true hex.workspace = true -id-map.workspace = true iddqd.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index e711c7f82d5..c79ac2d7228 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -26,8 +26,7 @@ use diesel::expression::SelectableHelper; use diesel::sql_types::Nullable; use futures::FutureExt; use futures::future::BoxFuture; -use id_map::{IdMap, IdMappable}; -use iddqd::IdOrdMap; +use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; use nexus_db_errors::ErrorHandler; use nexus_db_errors::public_error_from_diesel; use nexus_db_errors::public_error_from_diesel_lookup; @@ -3122,7 +3121,7 @@ impl DataStore { let mut omicron_sled_configs = { use nexus_db_schema::schema::inv_omicron_sled_config::dsl; - let mut configs = IdMap::new(); + let mut configs = IdOrdMap::new(); let mut paginator = Paginator::new( batch_size, @@ -3143,19 +3142,27 @@ impl DataStore { })?; paginator = p.found_batch(&batch, &|row| row.id); for sled_config in batch { - configs.insert(OmicronSledConfigWithId { - id: sled_config.id.into(), - config: OmicronSledConfig { - generation: sled_config.generation.into(), - remove_mupdate_override: sled_config - .remove_mupdate_override - .map(From::from), - disks: IdOrdMap::default(), - datasets: IdOrdMap::default(), - zones: IdOrdMap::default(), - host_phase_2: sled_config.host_phase_2.into(), - }, - }); + configs + .insert_unique(OmicronSledConfigWithId { + id: sled_config.id.into(), + config: OmicronSledConfig { + generation: sled_config.generation.into(), + remove_mupdate_override: sled_config + .remove_mupdate_override + .map(From::from), + disks: IdOrdMap::default(), + datasets: IdOrdMap::default(), + zones: IdOrdMap::default(), + host_phase_2: sled_config.host_phase_2.into(), + }, + }) + .map_err(|e| { + Error::internal_error(&format!( + "duplicate omicron sled config ID found, but \ + database guarantees uniqueness: {}", + InlineErrorChain::new(&e), + )) + })?; } } @@ -3248,7 +3255,7 @@ impl DataStore { }) .transpose()?; let mut config_with_id = omicron_sled_configs - .get_mut(&z.sled_config_id.into()) + .get_mut(&z.sled_config_id) .ok_or_else(|| { // This error means that we found a row in // inv_omicron_sled_config_zone with no associated record in @@ -3302,7 +3309,7 @@ impl DataStore { for row in batch { let mut config_with_id = omicron_sled_configs - .get_mut(&row.sled_config_id.into()) + .get_mut(&row.sled_config_id) .ok_or_else(|| { Error::internal_error(&format!( "dataset config {:?}: unknown config ID: {:?}", @@ -3343,7 +3350,7 @@ impl DataStore { for row in batch { let mut config_with_id = omicron_sled_configs - .get_mut(&row.sled_config_id.into()) + .get_mut(&row.sled_config_id) .ok_or_else(|| { Error::internal_error(&format!( "disk config {:?}: unknown config ID: {:?}", @@ -3841,7 +3848,7 @@ impl DataStore { let ledgered_sled_config = s .ledgered_sled_config .map(|id| { - omicron_sled_configs.get(&id.into()).as_ref() + omicron_sled_configs.get(&id).as_ref() .map(|c| c.config.clone()) .ok_or_else(|| { Error::internal_error( @@ -3863,7 +3870,7 @@ impl DataStore { .remove(&sled_id) .map(|reconciler| { let last_reconciled_config = omicron_sled_configs - .get(&reconciler.last_reconciled_config.into()) + .get(&reconciler.last_reconciled_config) .as_ref() .ok_or_else(|| { Error::internal_error( @@ -4109,12 +4116,12 @@ struct OmicronSledConfigWithId { config: OmicronSledConfig, } -impl IdMappable for OmicronSledConfigWithId { - type Id = OmicronSledConfigUuid; - - fn id(&self) -> Self::Id { +impl IdOrdItem for OmicronSledConfigWithId { + type Key<'a> = OmicronSledConfigUuid; + fn key(&self) -> Self::Key<'_> { self.id } + id_upcast!(); } #[derive(Debug)] diff --git a/nexus/mgs-updates/Cargo.toml b/nexus/mgs-updates/Cargo.toml index 8a7f8a4258a..edc4043d868 100644 --- a/nexus/mgs-updates/Cargo.toml +++ b/nexus/mgs-updates/Cargo.toml @@ -13,7 +13,6 @@ gateway-client.workspace = true gateway-types.workspace = true gateway-messages.workspace = true iddqd.workspace = true -id-map.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true nexus-sled-agent-shared.workspace = true diff --git a/nexus/mgs-updates/src/driver.rs b/nexus/mgs-updates/src/driver.rs index b00d04a410f..8e51402b43e 100644 --- a/nexus/mgs-updates/src/driver.rs +++ b/nexus/mgs-updates/src/driver.rs @@ -13,9 +13,9 @@ use crate::driver_update::apply_update; use futures::FutureExt; use futures::future::BoxFuture; use futures::stream::FuturesUnordered; -use id_map::IdMap; -use id_map::IdMappable; +use iddqd::IdOrdItem; use iddqd::IdOrdMap; +use iddqd::id_upcast; use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdates; use nexus_types::internal_api::views::CompletedAttempt; @@ -98,10 +98,10 @@ pub struct MgsUpdateDriver { delayq: DelayQueue>, /// tracks update attempts that are in-progress right now - in_progress: IdMap, + in_progress: IdOrdMap, /// tracks update attempts that are not running right now /// (but waiting for a retry) - waiting: IdMap, + waiting: IdOrdMap, } impl MgsUpdateDriver { @@ -127,8 +127,8 @@ impl MgsUpdateDriver { status_tx, futures: FuturesUnordered::new(), delayq: DelayQueue::new(), - in_progress: IdMap::new(), - waiting: IdMap::new(), + in_progress: IdOrdMap::new(), + waiting: IdOrdMap::new(), } } @@ -251,7 +251,7 @@ impl MgsUpdateDriver { let to_dispatch: Vec<_> = new_config .iter() .filter_map(|new_request| { - let baseboard_id = &new_request.baseboard_id; + let baseboard_id = &*new_request.baseboard_id; let do_dispatch = if let Some(waiting) = self.waiting.get(baseboard_id) { *new_request != waiting.internal_request.request @@ -272,7 +272,7 @@ impl MgsUpdateDriver { for baseboard_id in &to_stop_waiting { // Update our bookkeeping. // unwrap(): we filtered on this condition above. - let waiting = self.waiting.remove(baseboard_id).unwrap(); + let waiting = self.waiting.remove(&**baseboard_id).unwrap(); // Stop tracking this timeout. self.delayq.remove(&waiting.delay_key); @@ -293,7 +293,7 @@ impl MgsUpdateDriver { fn start_attempt(&mut self, internal_request: InternalRequest) { let request = &internal_request.request; let baseboard_id = &request.baseboard_id; - assert!(!self.in_progress.contains_key(baseboard_id)); + assert!(!self.in_progress.contains_key(&**baseboard_id)); let update_id = SpUpdateUuid::new_v4(); let log = self.log.new(o!( @@ -355,7 +355,9 @@ impl MgsUpdateDriver { self.futures.push(future); // Update our bookkeeping. - assert!(self.in_progress.insert(in_progress).is_none()); + self.in_progress + .insert_unique(in_progress) + .expect("baseboard_id must not already be in progress"); } /// Invoked when an in-progress update attempt has completed. @@ -364,7 +366,7 @@ impl MgsUpdateDriver { // this attempt. let in_progress = self .in_progress - .remove(&result.baseboard_id) + .remove(&*result.baseboard_id) .expect("in-progress record for attempt that just completed"); let nattempts_done = in_progress.internal_request.nattempts_done + 1; let completed = CompletedAttempt { @@ -409,7 +411,8 @@ impl MgsUpdateDriver { let retry_timeout = self.retry_timeout; let status_time_next = chrono::Utc::now() + retry_timeout; let delay_key = self.delayq.insert(baseboard_id.clone(), retry_timeout); - self.waiting.insert(WaitingAttempt { delay_key, internal_request }); + self.waiting + .insert_overwrite(WaitingAttempt { delay_key, internal_request }); // Update the overall status to reflect all these changes. self.status_tx.send_modify(|driver_status| { @@ -443,7 +446,7 @@ impl MgsUpdateDriver { // Remove this request from the set of requests waiting to be retried. let waiting = self .waiting - .remove(&baseboard_id) + .remove(&*baseboard_id) .expect("waiting request for expired retry timer"); // Update the external status to reflect that. self.status_tx.send_modify(|driver_status| { @@ -486,12 +489,14 @@ impl MgsUpdateDriver { } /// information tracked for each request +#[derive(Debug)] struct InternalRequest { request: PendingMgsUpdate, nattempts_done: u32, } /// internal bookkeeping for each in-progress update +#[derive(Debug)] struct InProgressUpdate { log: slog::Logger, time_started: chrono::DateTime, @@ -499,11 +504,12 @@ struct InProgressUpdate { internal_request: InternalRequest, } -impl IdMappable for InProgressUpdate { - type Id = Arc; - fn id(&self) -> Self::Id { - self.internal_request.request.baseboard_id.clone() +impl IdOrdItem for InProgressUpdate { + type Key<'a> = &'a BaseboardId; + fn key(&self) -> Self::Key<'_> { + &self.internal_request.request.baseboard_id } + id_upcast!(); } /// internal result of a completed update attempt @@ -518,17 +524,12 @@ struct WaitingAttempt { internal_request: InternalRequest, } -impl WaitingAttempt { - fn baseboard_id(&self) -> &Arc { +impl IdOrdItem for WaitingAttempt { + type Key<'a> = &'a BaseboardId; + fn key(&self) -> Self::Key<'_> { &self.internal_request.request.baseboard_id } -} - -impl IdMappable for WaitingAttempt { - type Id = Arc; - fn id(&self) -> Self::Id { - self.baseboard_id().clone() - } + id_upcast!(); } /// Interface used by update attempts to update just their part of the overall diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 4eb40638b86..c414a17e375 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -24,7 +24,6 @@ futures.workspace = true http.workspace = true humantime.workspace = true iddqd.workspace = true -id-map.workspace = true illumos-utils.workspace = true indent_write.workspace = true ipnetwork.workspace = true diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 0c027cc550d..c0b1abab249 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -158,7 +158,6 @@ use blueprint_display::{ BpDiffState, BpOmicronZonesTableSchema, BpPhysicalDisksTableSchema, BpTable, BpTableData, BpTableRow, KvList, constants::*, }; -use id_map::IdMappable; use std::str::FromStr; /// Describes a complete set of software and configuration for the system @@ -1077,14 +1076,6 @@ pub struct BlueprintZoneConfig { pub image_source: BlueprintZoneImageSource, } -impl IdMappable for BlueprintZoneConfig { - type Id = OmicronZoneUuid; - - fn id(&self) -> Self::Id { - self.id - } -} - impl IdOrdItem for BlueprintZoneConfig { type Key<'a> = OmicronZoneUuid; @@ -2143,14 +2134,6 @@ pub struct BlueprintPhysicalDiskConfig { pub pool_id: ZpoolUuid, } -impl IdMappable for BlueprintPhysicalDiskConfig { - type Id = PhysicalDiskUuid; - - fn id(&self) -> Self::Id { - self.id - } -} - impl IdOrdItem for BlueprintPhysicalDiskConfig { type Key<'a> = PhysicalDiskUuid; fn key(&self) -> Self::Key<'_> { @@ -2169,14 +2152,6 @@ impl From for OmicronPhysicalDiskConfig { } } -impl IdMappable for BlueprintDatasetConfig { - type Id = DatasetUuid; - - fn id(&self) -> Self::Id { - self.id - } -} - impl IdOrdItem for BlueprintDatasetConfig { type Key<'a> = DatasetUuid; fn key(&self) -> Self::Key<'_> { diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 0bbb9052409..40e2fdeb16d 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -44,7 +44,6 @@ http-body-util.workspace = true http-range.workspace = true hyper.workspace = true hyper-staticfile.workspace = true -id-map.workspace = true iddqd.workspace = true illumos-utils.workspace = true installinator-common.workspace = true diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 124eae7eb92..5bf25ccef36 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -4,7 +4,8 @@ //! Plan generation for "where should services be initialized". -use id_map::IdMap; +use iddqd::IdOrdMap; +use iddqd::errors::DuplicateItem; use illumos_utils::zpool::ZpoolName; use internal_dns_types::config::{ DnsConfigBuilder, DnsConfigParams, Host, Zone, @@ -97,19 +98,22 @@ pub enum PlanError { #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct SledConfig { /// Control plane disks configured for this sled - pub disks: IdMap, + pub disks: IdOrdMap, /// Datasets configured for this sled pub datasets: BTreeMap, /// zones configured for this sled - pub zones: IdMap, + pub zones: IdOrdMap, } impl SledConfig { /// Adds a zone to the Sled's configuration, as well as any number of /// durable datasets. - pub fn add_zone_and_datasets(&mut self, zone: BlueprintZoneConfig) { + pub fn add_zone_and_datasets( + &mut self, + zone: BlueprintZoneConfig, + ) -> Result<(), DuplicateItem> { let fs_dataset_name = DatasetName::new( zone.filesystem_pool, DatasetKind::TransientZone { @@ -150,11 +154,7 @@ impl SledConfig { } // Add the zone. - // - // Currently this is pushing back to a Vec; we could inspect to - // ensure this function is idempotent, but it currently is not - // re-callable. - self.zones.insert(zone); + self.zones.insert_unique(zone).map_err(|e| e.into_owned()) } } @@ -444,23 +444,27 @@ impl Plan { sled.alloc_dataset_from_u2s(DatasetKind::InternalDns)?; let filesystem_pool = *dataset_name.pool(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - filesystem_pool, - zone_type: BlueprintZoneType::InternalDns( - blueprint_zone_type::InternalDns { - dataset: OmicronZoneDataset { - pool_name: *dataset_name.pool(), + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + filesystem_pool, + zone_type: BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { + dataset: OmicronZoneDataset { + pool_name: *dataset_name.pool(), + }, + http_address, + dns_address, + gz_address: dns_subnet.gz_address(), + gz_address_index: i + .try_into() + .expect("Giant indices?"), }, - http_address, - dns_address, - gz_address: dns_subnet.gz_address(), - gz_address_index: i.try_into().expect("Giant indices?"), - }, - ), - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision CockroachDB zones, continuing to stripe across Sleds. @@ -480,20 +484,22 @@ impl Plan { let dataset_name = sled.alloc_dataset_from_u2s(DatasetKind::Cockroach)?; let filesystem_pool = *dataset_name.pool(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::CockroachDb( - blueprint_zone_type::CockroachDb { - address, - dataset: OmicronZoneDataset { - pool_name: *dataset_name.pool(), + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::CockroachDb( + blueprint_zone_type::CockroachDb { + address, + dataset: OmicronZoneDataset { + pool_name: *dataset_name.pool(), + }, }, - }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision external DNS zones, continuing to stripe across sleds. @@ -532,22 +538,24 @@ impl Plan { let dataset_name = sled.alloc_dataset_from_u2s(dataset_kind)?; let filesystem_pool = *dataset_name.pool(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::ExternalDns( - blueprint_zone_type::ExternalDns { - dataset: OmicronZoneDataset { - pool_name: *dataset_name.pool(), + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: OmicronZoneDataset { + pool_name: *dataset_name.pool(), + }, + http_address, + dns_address, + nic, }, - http_address, - dns_address, - nic, - }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision Nexus zones, continuing to stripe across sleds. @@ -566,31 +574,35 @@ impl Plan { .unwrap(); let (nic, external_ip) = svc_port_builder.next_nexus(id)?; let filesystem_pool = sled.alloc_zpool_from_u2s()?; - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::Nexus( - blueprint_zone_type::Nexus { - internal_address, - lockstep_port: NEXUS_LOCKSTEP_PORT, - external_ip: from_ipaddr_to_external_floating_ip( - external_ip, - ), - nic, - // Tell Nexus to use TLS if and only if the caller - // provided TLS certificates. This effectively - // determines the status of TLS for the lifetime of - // the rack. In production-like deployments, we'd - // always expect TLS to be enabled. It's only in - // development that it might not be. - external_tls: !config.external_certificates.is_empty(), - external_dns_servers: config.dns_servers.clone(), - nexus_generation: Generation::new(), - }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address, + lockstep_port: NEXUS_LOCKSTEP_PORT, + external_ip: from_ipaddr_to_external_floating_ip( + external_ip, + ), + nic, + // Tell Nexus to use TLS if and only if the caller + // provided TLS certificates. This effectively + // determines the status of TLS for the lifetime of + // the rack. In production-like deployments, we'd + // always expect TLS to be enabled. It's only in + // development that it might not be. + external_tls: !config + .external_certificates + .is_empty(), + external_dns_servers: config.dns_servers.clone(), + nexus_generation: Generation::new(), + }, + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision Oximeter zones, continuing to stripe across sleds. @@ -613,15 +625,17 @@ impl Plan { .host_zone_with_one_backend(id, ServiceName::Oximeter, address) .unwrap(); let filesystem_pool = sled.alloc_zpool_from_u2s()?; - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::Oximeter( - blueprint_zone_type::Oximeter { address }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }) + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::Oximeter( + blueprint_zone_type::Oximeter { address }, + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision Clickhouse zones, continuing to stripe across sleds. @@ -648,20 +662,22 @@ impl Plan { let dataset_name = sled.alloc_dataset_from_u2s(DatasetKind::Clickhouse)?; let filesystem_pool = *dataset_name.pool(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::Clickhouse( - blueprint_zone_type::Clickhouse { - address: http_address, - dataset: OmicronZoneDataset { - pool_name: *dataset_name.pool(), + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::Clickhouse( + blueprint_zone_type::Clickhouse { + address: http_address, + dataset: OmicronZoneDataset { + pool_name: *dataset_name.pool(), + }, }, - }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision Crucible Pantry zones, continuing to stripe across sleds. @@ -684,15 +700,17 @@ impl Plan { address, ) .unwrap(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::CruciblePantry( - blueprint_zone_type::CruciblePantry { address }, - ), - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::CruciblePantry( + blueprint_zone_type::CruciblePantry { address }, + ), + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } // Provision a Crucible zone on every zpool on every Sled. @@ -711,18 +729,22 @@ impl Plan { ) .unwrap(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::Crucible( - blueprint_zone_type::Crucible { - address, - dataset: OmicronZoneDataset { pool_name: *pool }, - }, - ), - filesystem_pool: *pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { + address, + dataset: OmicronZoneDataset { + pool_name: *pool, + }, + }, + ), + filesystem_pool: *pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } } @@ -771,13 +793,15 @@ impl Plan { .host_zone_with_one_backend(id, svcname, ntp_address) .unwrap(); - sled.request.add_zone_and_datasets(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type, - filesystem_pool, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + sled.request + .add_zone_and_datasets(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type, + filesystem_pool, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); } let services: HashMap<_, _> = sled_info diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index a82968753ca..bd49f99e9a5 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -1621,9 +1621,9 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( // value, we will need to revisit storing this in the serialized // RSS plan. sled_agent_generation: DeployStepVersion::V5_EVERYTHING, - disks: sled_config.disks.iter().cloned().collect(), + disks: sled_config.disks.clone(), datasets, - zones: sled_config.zones.iter().cloned().collect(), + zones: sled_config.zones.clone(), host_phase_2: BlueprintHostPhase2DesiredSlots::current_contents( ), remove_mupdate_override: None, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index e90b916d0a1..9d0d79fc9ee 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -18,7 +18,7 @@ use crate::rack_setup::{ use crate::sim::SimulatedUpstairs; use anyhow::{Context as _, anyhow}; use crucible_agent_client::types::State as RegionState; -use id_map::IdMap; +use iddqd::IdOrdMap; use illumos_utils::zpool::ZpoolName; use internal_dns_types::config::DnsConfigBuilder; use internal_dns_types::names::DNS_ZONE_EXTERNAL_TESTING; @@ -408,26 +408,30 @@ pub async fn run_standalone_server( SocketAddr::V6(a) => a, }; let pool_name = ZpoolName::new_external(ZpoolUuid::new_v4()); - let mut zones = IdMap::new(); - zones.insert(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id: OmicronZoneUuid::new_v4(), - zone_type: BlueprintZoneType::InternalDns( - blueprint_zone_type::InternalDns { - dataset: OmicronZoneDataset { pool_name }, - http_address: http_bound, - dns_address: match dns.dns_server.local_address() { - SocketAddr::V4(_) => panic!("did not expect v4 address"), - SocketAddr::V6(a) => a, + let mut zones = IdOrdMap::new(); + zones + .insert_unique(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: OmicronZoneUuid::new_v4(), + zone_type: BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { + dataset: OmicronZoneDataset { pool_name }, + http_address: http_bound, + dns_address: match dns.dns_server.local_address() { + SocketAddr::V4(_) => { + panic!("did not expect v4 address") + } + SocketAddr::V6(a) => a, + }, + gz_address: Ipv6Addr::LOCALHOST, + gz_address_index: 0, }, - gz_address: Ipv6Addr::LOCALHOST, - gz_address_index: 0, - }, - ), - // Co-locate the filesystem pool with the dataset - filesystem_pool: pool_name, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + // Co-locate the filesystem pool with the dataset + filesystem_pool: pool_name, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); let mut internal_services_ip_pool_ranges = vec![]; let mut macs = MacAddr::iter_system(); @@ -435,40 +439,46 @@ pub async fn run_standalone_server( let ip = nexus_external_addr.ip(); let id = OmicronZoneUuid::new_v4(); - zones.insert(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { - internal_address: match config.nexus_address { - SocketAddr::V4(_) => panic!("did not expect v4 address"), - SocketAddr::V6(a) => a, - }, - lockstep_port: nexus_lockstep_port, - external_ip: from_ipaddr_to_external_floating_ip(ip), - nic: nexus_types::inventory::NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { - id: id.into_untyped_uuid(), + zones + .insert_unique(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: match config.nexus_address { + SocketAddr::V4(_) => { + panic!("did not expect v4 address") + } + SocketAddr::V6(a) => a, + }, + lockstep_port: nexus_lockstep_port, + external_ip: from_ipaddr_to_external_floating_ip(ip), + nic: nexus_types::inventory::NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: id.into_untyped_uuid(), + }, + name: "nexus".parse().unwrap(), + ip: NEXUS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) + .unwrap() + .into(), + mac: macs.next().unwrap(), + subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + transit_ips: vec![], + }, + external_tls: false, + external_dns_servers: vec![], + nexus_generation: Generation::new(), }, - name: "nexus".parse().unwrap(), - ip: NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) - .unwrap() - .into(), - mac: macs.next().unwrap(), - subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - transit_ips: vec![], - }, - external_tls: false, - external_dns_servers: vec![], - nexus_generation: Generation::new(), - }), - filesystem_pool: get_random_zpool(), - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + filesystem_pool: get_random_zpool(), + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); internal_services_ip_pool_ranges.push(match ip { IpAddr::V4(addr) => { @@ -486,39 +496,41 @@ pub async fn run_standalone_server( let ip = *external_dns_internal_addr.ip(); let id = OmicronZoneUuid::new_v4(); let pool_name = ZpoolName::new_external(ZpoolUuid::new_v4()); - zones.insert(BlueprintZoneConfig { - disposition: BlueprintZoneDisposition::InService, - id, - zone_type: BlueprintZoneType::ExternalDns( - blueprint_zone_type::ExternalDns { - dataset: OmicronZoneDataset { pool_name }, - http_address: external_dns_internal_addr, - dns_address: from_sockaddr_to_external_floating_addr( - SocketAddr::V6(external_dns_internal_addr), - ), - nic: nexus_types::inventory::NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { - id: id.into_untyped_uuid(), + zones + .insert_unique(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: OmicronZoneDataset { pool_name }, + http_address: external_dns_internal_addr, + dns_address: from_sockaddr_to_external_floating_addr( + SocketAddr::V6(external_dns_internal_addr), + ), + nic: nexus_types::inventory::NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: id.into_untyped_uuid(), + }, + name: "external-dns".parse().unwrap(), + ip: DNS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) + .unwrap() + .into(), + mac: macs.next().unwrap(), + subnet: (*DNS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + transit_ips: vec![], }, - name: "external-dns".parse().unwrap(), - ip: DNS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) - .unwrap() - .into(), - mac: macs.next().unwrap(), - subnet: (*DNS_OPTE_IPV4_SUBNET).into(), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - transit_ips: vec![], }, - }, - ), - // Co-locate the filesystem pool with the dataset - filesystem_pool: pool_name, - image_source: BlueprintZoneImageSource::InstallDataset, - }); + ), + // Co-locate the filesystem pool with the dataset + filesystem_pool: pool_name, + image_source: BlueprintZoneImageSource::InstallDataset, + }) + .expect("freshly generated zone IDs are unique"); internal_services_ip_pool_ranges .push(IpRange::V6(Ipv6Range { first: ip, last: ip }));