From b7bcce32c9b70748752245bdde123e8406ee4032 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 24 Oct 2025 14:57:54 +0000 Subject: [PATCH 01/11] Local storage 2/4: Add the new disk type Add the ability to create LocalStorage type Disks. From the API this means that the DiskCreate struct has been changed into an enum, with variants for Crucible and LocalStorage. LocalStorage disks will be attached and detached just like Crucible disks, but will (currently) not support any of the other operations on a disk (snapshot, import, etc). Note that this is a breaking change to the Nexus external API - see nexus/types/src/external_api/params.rs for more on this. Note too that `content = "content"` was added to `InstanceDiskAttachment` because otherwise the type wouldn't pass the openapi lint step. A separate "local storage dataset allocation" row will be created during instance allocation in the next PR, slicing a portion of the zpool's local storage dataset. This will be delegated to the Propolis zone and used as file backed block storage for an NVMe device. Using a zvol this way was both measured to be the fastest in terms of 4k IOPs, and have the least amount of standard deviation in those IOPs measurements, according to fio runs. --- common/src/api/external/mod.rs | 12 +- dev-tools/dropshot-apis/src/main.rs | 1 + dev-tools/omdb/src/bin/omdb/db.rs | 49 +++ end-to-end-tests/src/bin/bootstrap.rs | 2 +- end-to-end-tests/src/instance_launch.rs | 13 +- nexus/db-model/src/disk.rs | 8 +- nexus/db-model/src/disk_type_crucible.rs | 10 +- nexus/db-model/src/disk_type_local_storage.rs | 68 ++++ nexus/db-model/src/lib.rs | 4 + .../src/local_storage_dataset_allocation.rs | 58 +++ nexus/db-model/src/schema_versions.rs | 3 +- nexus/db-queries/src/db/datastore/disk.rs | 260 +++++++++++++- nexus/db-queries/src/db/datastore/mod.rs | 121 +++---- nexus/db-schema/src/schema.rs | 42 ++- nexus/src/app/disk.rs | 150 ++++++-- nexus/src/app/instance.rs | 38 ++ nexus/src/app/instance_platform/mod.rs | 13 +- nexus/src/app/sagas/disk_create.rs | 334 +++++++++++++----- nexus/src/app/sagas/disk_delete.rs | 5 + nexus/src/app/sagas/finalize_disk.rs | 6 + nexus/src/app/sagas/instance_create.rs | 2 +- .../src/app/sagas/region_replacement_start.rs | 15 +- .../region_snapshot_replacement_start.rs | 15 +- nexus/src/app/sagas/snapshot_create.rs | 31 +- nexus/src/app/snapshot.rs | 6 + nexus/test-utils/src/resource_helpers.rs | 29 +- .../crucible_replacements.rs | 60 +++- nexus/tests/integration_tests/disks.rs | 50 +-- nexus/tests/integration_tests/endpoints.rs | 4 +- nexus/tests/integration_tests/images.rs | 9 +- nexus/tests/integration_tests/instances.rs | 53 +-- .../tests/integration_tests/local_storage.rs | 165 +++++++++ nexus/tests/integration_tests/mod.rs | 1 + nexus/tests/integration_tests/pantry.rs | 16 +- nexus/tests/integration_tests/quotas.rs | 2 +- nexus/tests/integration_tests/snapshots.rs | 31 +- nexus/tests/integration_tests/utilization.rs | 2 +- .../integration_tests/volume_management.rs | 91 +++-- nexus/types/src/external_api/params.rs | 52 ++- openapi/nexus.json | 152 ++++---- schema/crdb/dbinit.sql | 33 +- schema/crdb/local-storage-disk-type/up01.sql | 6 + schema/crdb/local-storage-disk-type/up02.sql | 7 + schema/crdb/local-storage-disk-type/up03.sql | 12 + 44 files changed, 1611 insertions(+), 430 deletions(-) create mode 100644 nexus/db-model/src/disk_type_local_storage.rs create mode 100644 nexus/db-model/src/local_storage_dataset_allocation.rs create mode 100644 nexus/tests/integration_tests/local_storage.rs create mode 100644 schema/crdb/local-storage-disk-type/up01.sql create mode 100644 schema/crdb/local-storage-disk-type/up02.sql create mode 100644 schema/crdb/local-storage-disk-type/up03.sql diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index a0101fd7e84..58639ecd1fc 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -593,7 +593,16 @@ impl Display for ByteCount { } // TODO-cleanup This could use the experimental std::num::IntErrorKind. -#[derive(Debug, Eq, thiserror::Error, Ord, PartialEq, PartialOrd)] +#[derive( + Debug, + Eq, + thiserror::Error, + Ord, + PartialEq, + PartialOrd, + Serialize, + Deserialize, +)] pub enum ByteCountRangeError { #[error("value is too small for a byte count")] TooSmall, @@ -1425,6 +1434,7 @@ impl SimpleIdentityOrName for AntiAffinityGroupMember { #[serde(rename_all = "snake_case")] pub enum DiskType { Crucible, + LocalStorage, } /// View of a Disk diff --git a/dev-tools/dropshot-apis/src/main.rs b/dev-tools/dropshot-apis/src/main.rs index 56ec9253fdc..176f9e3ce74 100644 --- a/dev-tools/dropshot-apis/src/main.rs +++ b/dev-tools/dropshot-apis/src/main.rs @@ -315,6 +315,7 @@ fn all_apis() -> anyhow::Result { let apis = ManagedApis::new(apis) .context("error creating ManagedApis")? .with_validation(validate); + Ok(apis) } diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index d3ee0247503..64ae3d4d5c9 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -120,6 +120,7 @@ use nexus_db_queries::db::datastore::CrucibleTargets; use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::datastore::InstanceStateComputer; +use nexus_db_queries::db::datastore::LocalStorageDisk; use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; use nexus_db_queries::db::datastore::VolumeCookedResult; use nexus_db_queries::db::datastore::read_only_resources_associated_with_volume; @@ -152,6 +153,7 @@ use omicron_common::api::external::MacAddr; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::DownstairsRegionUuid; +use omicron_uuid_kinds::ExternalZpoolUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::ParseError; @@ -2277,6 +2279,52 @@ async fn crucible_disk_info( Ok(()) } +async fn local_storage_disk_info( + disk: LocalStorageDisk, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct Row { + disk_name: String, + + time_created: DateTime, + #[tabled(display_with = "display_option_blank")] + time_deleted: Option>, + + dataset_id: DatasetUuid, + pool_id: ExternalZpoolUuid, + sled_id: SledUuid, + + dataset_size: u64, + } + + if let Some(allocation) = &disk.local_storage_dataset_allocation { + let rows = vec![Row { + disk_name: disk.name().to_string(), + + time_created: allocation.time_created, + time_deleted: allocation.time_deleted, + + dataset_id: allocation.local_storage_dataset_id(), + pool_id: allocation.pool_id(), + sled_id: allocation.sled_id(), + + dataset_size: allocation.dataset_size.to_bytes(), + }]; + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + } else { + println!("no allocation yet"); + } + + Ok(()) +} + /// Run `omdb db disk info `. async fn cmd_db_disk_info( opctx: &OpContext, @@ -2287,6 +2335,7 @@ async fn cmd_db_disk_info( Disk::Crucible(disk) => { crucible_disk_info(opctx, datastore, disk).await } + Disk::LocalStorage(disk) => local_storage_disk_info(disk).await, } } diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index 5aa9cf22f7f..df7c1e34c86 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -95,7 +95,7 @@ async fn run_test() -> Result<()> { ctx.client .disk_create() .project(ctx.project_name.clone()) - .body(DiskCreate { + .body(DiskCreate::Crucible { name: disk_name.clone(), description: String::new(), disk_source: DiskSource::Blank { diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index 02b02f19a44..99179cc3219 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -6,8 +6,9 @@ use async_trait::async_trait; use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use oxide_client::types::{ ByteCount, DiskCreate, DiskSource, ExternalIp, ExternalIpCreate, - InstanceCpuCount, InstanceCreate, InstanceDiskAttachment, - InstanceNetworkInterfaceAttachment, InstanceState, SshKeyCreate, + InstanceCpuCount, InstanceCreate, InstanceDiskAttach, + InstanceDiskAttachment, InstanceNetworkInterfaceAttachment, InstanceState, + SshKeyCreate, }; use oxide_client::{ClientCurrentUserExt, ClientDisksExt, ClientInstancesExt}; use russh::{ChannelMsg, Disconnect}; @@ -42,7 +43,7 @@ async fn instance_launch() -> Result<()> { .client .disk_create() .project(ctx.project_name.clone()) - .body(DiskCreate { + .body(DiskCreate::Crucible { name: disk_name.clone(), description: String::new(), disk_source: DiskSource::Image { @@ -66,9 +67,9 @@ async fn instance_launch() -> Result<()> { hostname: "localshark".parse().unwrap(), // 🦈 memory: ByteCount(1024 * 1024 * 1024), ncpus: InstanceCpuCount(2), - boot_disk: Some(InstanceDiskAttachment::Attach { - name: disk_name.clone(), - }), + boot_disk: Some(InstanceDiskAttachment::Attach( + InstanceDiskAttach { name: disk_name.clone() }, + )), disks: Vec::new(), network_interfaces: InstanceNetworkInterfaceAttachment::Default, external_ips: vec![ExternalIpCreate::Ephemeral { pool: None }], diff --git a/nexus/db-model/src/disk.rs b/nexus/db-model/src/disk.rs index c2222ca1f3c..5572f8353ff 100644 --- a/nexus/db-model/src/disk.rs +++ b/nexus/db-model/src/disk.rs @@ -26,6 +26,7 @@ impl_enum_type!( // Enum values Crucible => b"crucible" + LocalStorage => b"local_storage" ); /// A Disk, where how the blocks are stored depend on the disk_type. @@ -75,7 +76,8 @@ pub struct Disk { /// (where rows are matched based on the disk_id field in that table) and /// combined into a higher level `datastore::Disk` enum. /// - /// For `Crucible` disks, see the DiskTypeCrucible model. + /// For `Crucible` disks, see the DiskTypeCrucible model. For `LocalStorage` + /// disks, see the DiskTypeLocalStorage model. pub disk_type: DiskType, } @@ -88,7 +90,7 @@ impl Disk { runtime_initial: DiskRuntimeState, disk_type: DiskType, ) -> Self { - let identity = DiskIdentity::new(disk_id, params.identity.clone()); + let identity = DiskIdentity::new(disk_id, params.identity().clone()); Self { identity, @@ -96,7 +98,7 @@ impl Disk { project_id, runtime_state: runtime_initial, slot: None, - size: params.size.into(), + size: params.size().into(), block_size, disk_type, } diff --git a/nexus/db-model/src/disk_type_crucible.rs b/nexus/db-model/src/disk_type_crucible.rs index 600a1002c8e..1964fb269d1 100644 --- a/nexus/db-model/src/disk_type_crucible.rs +++ b/nexus/db-model/src/disk_type_crucible.rs @@ -43,16 +43,16 @@ impl DiskTypeCrucible { pub fn new( disk_id: Uuid, volume_id: VolumeUuid, - params: ¶ms::DiskCreate, + disk_source: ¶ms::DiskSource, ) -> Self { - let create_snapshot_id = match params.disk_source { - params::DiskSource::Snapshot { snapshot_id } => Some(snapshot_id), + let create_snapshot_id = match disk_source { + params::DiskSource::Snapshot { snapshot_id } => Some(*snapshot_id), _ => None, }; // XXX further enum here for different image types? - let create_image_id = match params.disk_source { - params::DiskSource::Image { image_id } => Some(image_id), + let create_image_id = match disk_source { + params::DiskSource::Image { image_id } => Some(*image_id), _ => None, }; diff --git a/nexus/db-model/src/disk_type_local_storage.rs b/nexus/db-model/src/disk_type_local_storage.rs new file mode 100644 index 00000000000..c5917084f05 --- /dev/null +++ b/nexus/db-model/src/disk_type_local_storage.rs @@ -0,0 +1,68 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::ByteCount; +use crate::typed_uuid::DbTypedUuid; +use nexus_db_schema::schema::disk_type_local_storage; +use omicron_common::api::external; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +/// A Disk can be backed using a zvol slice from the local storage dataset +/// present on each zpool of a sled. +#[derive( + Queryable, Insertable, Clone, Debug, Selectable, Serialize, Deserialize, +)] +#[diesel(table_name = disk_type_local_storage)] +pub struct DiskTypeLocalStorage { + disk_id: Uuid, + + /// For zvols inside a parent dataset, there's an overhead that must be + /// accounted for when setting a quota and reservation on that parent + /// dataset. Record at model creation time how much overhead is required for + /// the parent `local_storage` dataset slice in order to fit the child + /// volume. + required_dataset_overhead: ByteCount, + + local_storage_dataset_allocation_id: Option>, +} + +impl DiskTypeLocalStorage { + /// Creates a new `DiskTypeLocalStorage`. Returns Err if the computed + /// required dataset overhead does not fit in a `ByteCount`. + pub fn new( + disk_id: Uuid, + size: external::ByteCount, + ) -> Result { + // For zvols, there's an overhead that must be accounted for, and it + // empirically seems to be about 65M per 1G for volblocksize=4096. + // Multiple the disk size by something a little over this value. + + let one_gb = external::ByteCount::from_gibibytes_u32(1).to_bytes(); + let gbs = size.to_bytes() / one_gb; + let overhead: u64 = + external::ByteCount::from_mebibytes_u32(70).to_bytes() * gbs; + + // Don't unwrap this - the size of this disk is a parameter set by an + // API call, and we don't want to panic on out of range input. + let required_dataset_overhead = + external::ByteCount::try_from(overhead)?; + + Ok(DiskTypeLocalStorage { + disk_id, + required_dataset_overhead: required_dataset_overhead.into(), + local_storage_dataset_allocation_id: None, + }) + } + + pub fn required_dataset_overhead(&self) -> external::ByteCount { + self.required_dataset_overhead.into() + } + + pub fn local_storage_dataset_allocation_id(&self) -> Option { + self.local_storage_dataset_allocation_id.map(Into::into) + } +} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b81d296479..022f51f8dbb 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -36,6 +36,7 @@ mod digest; mod disk; mod disk_state; mod disk_type_crucible; +mod disk_type_local_storage; mod dns; mod downstairs; mod external_ip; @@ -58,6 +59,7 @@ pub mod ipv6; mod ipv6net; mod l4_port_range; mod local_storage; +mod local_storage_dataset_allocation; mod macaddr; mod migration; mod migration_state; @@ -182,6 +184,7 @@ pub use digest::*; pub use disk::*; pub use disk_state::*; pub use disk_type_crucible::*; +pub use disk_type_local_storage::*; pub use dns::*; pub use downstairs::*; pub use ereport::*; @@ -206,6 +209,7 @@ pub use ipv6::*; pub use ipv6net::*; pub use l4_port_range::*; pub use local_storage::*; +pub use local_storage_dataset_allocation::*; pub use migration::*; pub use migration_state::*; pub use multicast_group::*; diff --git a/nexus/db-model/src/local_storage_dataset_allocation.rs b/nexus/db-model/src/local_storage_dataset_allocation.rs new file mode 100644 index 00000000000..3b7741d42b4 --- /dev/null +++ b/nexus/db-model/src/local_storage_dataset_allocation.rs @@ -0,0 +1,58 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::DbTypedUuid; +use crate::ByteCount; +use chrono::DateTime; +use chrono::Utc; +use nexus_db_schema::schema::local_storage_dataset_allocation; +use omicron_uuid_kinds::DatasetKind; +use omicron_uuid_kinds::DatasetUuid; +use omicron_uuid_kinds::ExternalZpoolKind; +use omicron_uuid_kinds::ExternalZpoolUuid; +use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::SledUuid; +use serde::Deserialize; +use serde::Serialize; + +/// A slice of a the local storage dataset present on each zpool, allocated for +/// use with a local storage type Disk. This is a child dataset of that local +/// storage dataset, itself containing a child zvol that will be delegated to a +/// Propolis zone. +#[derive( + Queryable, Insertable, Debug, Clone, Selectable, Serialize, Deserialize, +)] +#[diesel(table_name = local_storage_dataset_allocation)] +pub struct LocalStorageDatasetAllocation { + id: DbTypedUuid, + + pub time_created: DateTime, + pub time_deleted: Option>, + + local_storage_dataset_id: DbTypedUuid, + pool_id: DbTypedUuid, + sled_id: DbTypedUuid, + + /// Size of this dataset, which is enough to contain the child zvol plus + /// some overhead. + pub dataset_size: ByteCount, +} + +impl LocalStorageDatasetAllocation { + pub fn id(&self) -> DatasetUuid { + self.id.into() + } + + pub fn local_storage_dataset_id(&self) -> DatasetUuid { + self.local_storage_dataset_id.into() + } + + pub fn pool_id(&self) -> ExternalZpoolUuid { + self.pool_id.into() + } + + pub fn sled_id(&self) -> SledUuid { + self.sled_id.into() + } +} diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 1ee9a2a936a..bd8207c8843 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(209, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(210, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(210, "local-storage-disk-type"), KnownVersion::new(209, "multicast-group-support"), KnownVersion::new(208, "disable-tuf-repo-pruner"), KnownVersion::new(207, "disk-types"), diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index ba4d8d254c1..fcd43ee5f06 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -22,7 +22,9 @@ use crate::db::model::DiskRuntimeState; use crate::db::model::DiskState; use crate::db::model::DiskTypeCrucible; use crate::db::model::DiskTypeCrucibleUpdate; +use crate::db::model::DiskTypeLocalStorage; use crate::db::model::Instance; +use crate::db::model::LocalStorageDatasetAllocation; use crate::db::model::Name; use crate::db::model::Project; use crate::db::model::VirtualProvisioningResource; @@ -41,6 +43,7 @@ use nexus_db_errors::OptionalError; use nexus_db_errors::public_error_from_diesel; use nexus_db_lookup::LookupPath; use omicron_common::api; +use omicron_common::api::external; use omicron_common::api::external::CreateResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; @@ -49,6 +52,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::zpool_name::ZpoolName; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::VolumeUuid; use ref_cast::RefCast; @@ -61,12 +65,15 @@ use uuid::Uuid; #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Disk { Crucible(CrucibleDisk), + + LocalStorage(LocalStorageDisk), } impl Disk { pub fn model(&self) -> &model::Disk { match &self { Disk::Crucible(disk) => disk.model(), + Disk::LocalStorage(disk) => disk.model(), } } @@ -159,6 +166,95 @@ impl CrucibleDisk { } } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LocalStorageDisk { + pub disk: model::Disk, + pub disk_type_local_storage: DiskTypeLocalStorage, + pub local_storage_dataset_allocation: Option, +} + +impl LocalStorageDisk { + /// Create a new unallocated LocalStorageDisk + pub fn new( + disk: model::Disk, + disk_type_local_storage: DiskTypeLocalStorage, + ) -> LocalStorageDisk { + LocalStorageDisk { + disk, + disk_type_local_storage, + local_storage_dataset_allocation: None, + } + } + + pub fn model(&self) -> &model::Disk { + &self.disk + } + + pub fn id(&self) -> Uuid { + self.model().id() + } + + pub fn name(&self) -> &api::external::Name { + self.model().name() + } + + pub fn time_deleted(&self) -> Option> { + self.model().time_deleted() + } + + pub fn project_id(&self) -> Uuid { + self.model().project_id + } + + pub fn runtime(&self) -> DiskRuntimeState { + self.model().runtime() + } + + pub fn state(&self) -> DiskState { + self.model().state() + } + + pub fn size(&self) -> model::ByteCount { + self.model().size + } + + pub fn slot(&self) -> Option { + self.model().slot() + } + + pub fn required_dataset_overhead(&self) -> external::ByteCount { + self.disk_type_local_storage.required_dataset_overhead() + } + + /// Return the full path to the local storage zvol's device + pub fn zvol_path(&self) -> Result { + let Some(local_storage_dataset_allocation) = + &self.local_storage_dataset_allocation + else { + return Err(Error::internal_error(&format!( + "LocalStorageDisk {} not allocated!", + self.id(), + ))); + }; + + let pool_id = local_storage_dataset_allocation.pool_id(); + let dataset_id = local_storage_dataset_allocation.id(); + + let zpool_name = ZpoolName::External(pool_id); + + let path = [ + // Each zvol's path to the device starts with this + String::from("/dev/zvol/rdsk"), + // All local storage datasets have the same path template, and will + // all be called "vol" for now. + format!("{zpool_name}/crypt/local_storage/{dataset_id}/vol"), + ] + .join("/"); + + Ok(path) + } +} + /// Conversion to the external API type. impl Into for Disk { fn into(self) -> api::external::Disk { @@ -178,6 +274,26 @@ impl Into for Disk { disk_type: api::external::DiskType::Crucible, } } + + Disk::LocalStorage(LocalStorageDisk { + disk, + disk_type_local_storage: _, + local_storage_dataset_allocation: _, + }) => { + // XXX can we remove this? + let device_path = format!("/mnt/{}", disk.name().as_str()); + api::external::Disk { + identity: disk.identity(), + project_id: disk.project_id, + snapshot_id: None, + image_id: None, + size: disk.size.into(), + block_size: disk.block_size.into(), + state: disk.state().into(), + device_path, + disk_type: api::external::DiskType::LocalStorage, + } + } } } } @@ -191,10 +307,10 @@ impl DataStore { let (.., disk) = LookupPath::new(opctx, self).disk_id(disk_id).fetch().await?; + let conn = self.pool_connection_authorized(opctx).await?; + let disk = match disk.disk_type { db::model::DiskType::Crucible => { - let conn = self.pool_connection_authorized(opctx).await?; - use nexus_db_schema::schema::disk_type_crucible::dsl; let disk_type_crucible = dsl::disk_type_crucible @@ -208,6 +324,50 @@ impl DataStore { Disk::Crucible(CrucibleDisk { disk, disk_type_crucible }) } + + db::model::DiskType::LocalStorage => { + use nexus_db_schema::schema::disk_type_local_storage::dsl; + + let disk_type_local_storage = dsl::disk_type_local_storage + .filter(dsl::disk_id.eq(disk_id)) + .select(DiskTypeLocalStorage::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + let local_storage_dataset_allocation = if let Some( + allocation_id, + ) = + disk_type_local_storage + .local_storage_dataset_allocation_id() + { + use nexus_db_schema::schema::local_storage_dataset_allocation::dsl; + + Some( + dsl::local_storage_dataset_allocation + .filter(dsl::id.eq(to_db_typed_uuid(allocation_id))) + .select(LocalStorageDatasetAllocation::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + })?, + ) + } else { + None + }; + + Disk::LocalStorage(LocalStorageDisk { + disk, + disk_type_local_storage, + local_storage_dataset_allocation, + }) + } }; Ok(disk) @@ -268,9 +428,12 @@ impl DataStore { ) -> ListResultVec { use nexus_db_schema::schema::disk::dsl; use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; + use nexus_db_schema::schema::disk_type_local_storage::dsl as disk_type_local_storage_dsl; opctx.authorize(authz::Action::ListChildren, authz_instance).await?; + let conn = self.pool_connection_authorized(opctx).await?; + let results = match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::disk, dsl::id, &pagparams) @@ -285,13 +448,18 @@ impl DataStore { disk_type_crucible_dsl::disk_type_crucible .on(dsl::id.eq(disk_type_crucible_dsl::disk_id)), ) + .left_join( + disk_type_local_storage_dsl::disk_type_local_storage + .on(dsl::id.eq(disk_type_local_storage_dsl::disk_id)), + ) .filter(dsl::time_deleted.is_null()) .filter(dsl::attach_instance_id.eq(authz_instance.id())) .select(( model::Disk::as_select(), Option::::as_select(), + Option::::as_select(), )) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .get_results_async(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; @@ -299,14 +467,51 @@ impl DataStore { for result in results { match result { - (disk, Some(disk_type_crucible)) => { + (disk, Some(disk_type_crucible), None) => { list.push(Disk::Crucible(CrucibleDisk { disk, disk_type_crucible, })); } - (disk, None) => { + (disk, None, Some(disk_type_local_storage)) => { + let local_storage_dataset_allocation = if let Some( + allocation_id, + ) = + disk_type_local_storage + .local_storage_dataset_allocation_id() + { + use nexus_db_schema::schema::local_storage_dataset_allocation::dsl; + + Some( + dsl::local_storage_dataset_allocation + .filter( + dsl::id.eq(to_db_typed_uuid(allocation_id)), + ) + .select( + LocalStorageDatasetAllocation::as_select(), + ) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + })?, + ) + } else { + None + }; + + list.push(Disk::LocalStorage(LocalStorageDisk { + disk, + disk_type_local_storage, + local_storage_dataset_allocation, + })); + } + + (disk, _, _) => { // The above paginated query attempts to get all types of // disk in one query, instead of matching on the disk type // of each returned disk row and doing additional queries. @@ -334,7 +539,6 @@ impl DataStore { disk: Disk, ) -> Result { use nexus_db_schema::schema::disk::dsl; - use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; let gen = disk.runtime().gen; let name = disk.name().clone(); @@ -364,9 +568,35 @@ impl DataStore { match &disk { Disk::Crucible(CrucibleDisk { disk: _, disk_type_crucible }) => { - diesel::insert_into(disk_type_crucible_dsl::disk_type_crucible) + use nexus_db_schema::schema::disk_type_crucible::dsl; + + diesel::insert_into(dsl::disk_type_crucible) .values(disk_type_crucible.clone()) - .on_conflict(disk_type_crucible_dsl::disk_id) + .on_conflict(dsl::disk_id) + .do_nothing() + .execute_async(conn) + .await?; + } + + Disk::LocalStorage(LocalStorageDisk { + disk, + disk_type_local_storage, + local_storage_dataset_allocation, + }) => { + if local_storage_dataset_allocation.is_some() { + // This allocation is currently only performed during + // instance allocation, return an error here. + return Err(err.bail(Error::internal_error(&format!( + "local storage disk {} has an allocation!", + disk.id(), + )))); + } + + use nexus_db_schema::schema::disk_type_local_storage::dsl; + + diesel::insert_into(dsl::disk_type_local_storage) + .values(disk_type_local_storage.clone()) + .on_conflict(dsl::disk_id) .do_nothing() .execute_async(conn) .await?; @@ -1194,7 +1424,6 @@ mod tests { use crate::db::pub_test_utils::TestDatabase; use nexus_types::external_api::params; - use omicron_common::api::external; use omicron_test_utils::dev; #[tokio::test] @@ -1224,14 +1453,17 @@ mod tests { .unwrap(); let disk_id = Uuid::new_v4(); - let create_params = params::DiskCreate { + + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }; + + let create_params = params::DiskCreate::Crucible { identity: external::IdentityMetadataCreateParams { name: "first-post".parse().unwrap(), description: "just trying things out".to_string(), }, - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(512).unwrap(), - }, + disk_source: disk_source.clone(), size: external::ByteCount::from(2147483648), }; @@ -1247,7 +1479,7 @@ mod tests { let disk_type_crucible = db::model::DiskTypeCrucible::new( disk_id, VolumeUuid::new_v4(), - &create_params, + &disk_source, ); let disk = db_datastore diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 354c58a0300..88ce4cd8c44 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -134,6 +134,7 @@ pub use db_metadata::ValidatedDatastoreSetupAction; pub use deployment::BlueprintLimitReachedOutput; pub use disk::CrucibleDisk; pub use disk::Disk; +pub use disk::LocalStorageDisk; pub use dns::DataStoreDnsTest; pub use dns::DnsVersionUpdateBuilder; pub use ereport::EreportFilters; @@ -941,22 +942,6 @@ mod test { .unwrap(); } - fn create_test_disk_create_params( - name: &str, - size: ByteCount, - ) -> params::DiskCreate { - params::DiskCreate { - identity: IdentityMetadataCreateParams { - name: Name::try_from(name.to_string()).unwrap(), - description: name.to_string(), - }, - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(4096).unwrap(), - }, - size, - } - } - #[derive(Debug)] pub(crate) struct TestDatasets { // eligible and ineligible aren't currently used, but are probably handy @@ -1163,10 +1148,10 @@ mod test { // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { - let params = create_test_disk_create_params( - &format!("disk{}", alloc_seed), - ByteCount::from_mebibytes_u32(1), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(1); let volume_id = VolumeUuid::new_v4(); let expected_region_count = REGION_REDUNDANCY_THRESHOLD; @@ -1174,8 +1159,8 @@ mod test { .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(alloc_seed), }, @@ -1222,7 +1207,7 @@ mod test { assert_eq!(ByteCount::from(4096), region.block_size()); let (_, extent_count) = DataStore::get_crucible_allocation( &BlockSize::AdvancedFormat, - params.size, + size, ); assert_eq!(extent_count, region.extent_count()); } @@ -1257,10 +1242,10 @@ mod test { // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { - let params = create_test_disk_create_params( - &format!("disk{}", alloc_seed), - ByteCount::from_mebibytes_u32(1), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(1); let volume_id = VolumeUuid::new_v4(); let expected_region_count = REGION_REDUNDANCY_THRESHOLD; @@ -1268,8 +1253,8 @@ mod test { .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &&RegionAllocationStrategy::RandomWithDistinctSleds { seed: Some(alloc_seed), }, @@ -1311,7 +1296,7 @@ mod test { assert_eq!(ByteCount::from(4096), region.block_size()); let (_, extent_count) = DataStore::get_crucible_allocation( &BlockSize::AdvancedFormat, - params.size, + size, ); assert_eq!(extent_count, region.extent_count()); } @@ -1345,18 +1330,18 @@ mod test { // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { - let params = create_test_disk_create_params( - &format!("disk{}", alloc_seed), - ByteCount::from_mebibytes_u32(1), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(1); let volume_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &&RegionAllocationStrategy::RandomWithDistinctSleds { seed: Some(alloc_seed), }, @@ -1391,17 +1376,17 @@ mod test { .await; // Allocate regions from the datasets for this volume. - let params = create_test_disk_create_params( - "disk", - ByteCount::from_mebibytes_u32(500), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(500); let volume_id = VolumeUuid::new_v4(); let mut dataset_and_regions1 = datastore .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await @@ -1413,8 +1398,8 @@ mod test { .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(1) }, ) .await @@ -1495,17 +1480,17 @@ mod test { .await; // Allocate regions from the datasets for this volume. - let params = create_test_disk_create_params( - "disk1", - ByteCount::from_mebibytes_u32(500), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(500); let volume1_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( &opctx, volume1_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await @@ -1527,8 +1512,8 @@ mod test { .disk_region_allocate( &opctx, volume1_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await @@ -1589,17 +1574,17 @@ mod test { .await; // Allocate regions from the datasets for this volume. - let params = create_test_disk_create_params( - "disk1", - ByteCount::from_mebibytes_u32(500), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(500); let volume1_id = VolumeUuid::new_v4(); let err = datastore .disk_region_allocate( &opctx, volume1_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await @@ -1681,10 +1666,10 @@ mod test { ]; let volume_id = VolumeUuid::new_v4(); - let params = create_test_disk_create_params( - "disk", - ByteCount::from_mebibytes_u32(500), - ); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; + let size = ByteCount::from_mebibytes_u32(500); for (policy, state, expected) in policy_state_combos { // Update policy/state only on a single physical disk. @@ -1707,8 +1692,8 @@ mod test { .disk_region_allocate( &opctx, volume_id, - ¶ms.disk_source, - params.size, + &disk_source, + size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await; @@ -1748,8 +1733,10 @@ mod test { .await; let disk_size = test_zpool_size(); + let disk_source = params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }; let alloc_size = ByteCount::try_from(disk_size.to_bytes() * 2).unwrap(); - let params = create_test_disk_create_params("disk1", alloc_size); let volume1_id = VolumeUuid::new_v4(); assert!( @@ -1757,8 +1744,8 @@ mod test { .disk_region_allocate( &opctx, volume1_id, - ¶ms.disk_source, - params.size, + &disk_source, + alloc_size, &RegionAllocationStrategy::Random { seed: Some(0) }, ) .await diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 8700c739972..c52e0e98384 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -39,7 +39,12 @@ table! { } } -allow_tables_to_appear_in_same_query!(disk, disk_type_crucible); +allow_tables_to_appear_in_same_query!( + disk, + disk_type_crucible, + disk_type_local_storage, +); + allow_tables_to_appear_in_same_query!(volume, disk_type_crucible); allow_tables_to_appear_in_same_query!( disk_type_crucible, @@ -2928,3 +2933,38 @@ table! { } allow_tables_to_appear_in_same_query!(fm_sitrep, fm_sitrep_history); + +table! { + disk_type_local_storage (disk_id) { + disk_id -> Uuid, + + required_dataset_overhead -> Int8, + + local_storage_dataset_allocation_id -> Nullable, + } +} + +table! { + local_storage_dataset_allocation (id) { + id -> Uuid, + + time_created -> Timestamptz, + time_deleted -> Nullable, + + local_storage_dataset_id -> Uuid, + pool_id -> Uuid, + sled_id -> Uuid, + + dataset_size -> Int8, + } +} + +allow_tables_to_appear_in_same_query!( + disk_type_local_storage, + local_storage_dataset_allocation +); + +allow_tables_to_appear_in_same_query!( + rendezvous_local_storage_dataset, + local_storage_dataset_allocation +); diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index c6ed1a3bcac..2608c9aaf58 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -77,21 +77,22 @@ impl super::Nexus { self.db_datastore.disk_get(opctx, authz_disk.id()).await } - pub(super) async fn validate_disk_create_params( + pub(super) async fn validate_crucible_disk_create_params( self: &Arc, opctx: &OpContext, authz_project: &authz::Project, - params: ¶ms::DiskCreate, - ) -> Result<(), Error> { - let block_size: u64 = match params.disk_source { + disk_source: ¶ms::DiskSource, + size: ByteCount, + ) -> Result { + let block_size: u64 = match disk_source { params::DiskSource::Blank { block_size } | params::DiskSource::ImportingBlocks { block_size } => { - block_size.into() + (*block_size).into() } params::DiskSource::Snapshot { snapshot_id } => { let (.., db_snapshot) = LookupPath::new(opctx, &self.db_datastore) - .snapshot_id(snapshot_id) + .snapshot_id(*snapshot_id) .fetch() .await?; @@ -105,10 +106,10 @@ impl super::Nexus { // If the size of the snapshot is greater than the size of the // disk, return an error. - if db_snapshot.size.to_bytes() > params.size.to_bytes() { + if db_snapshot.size.to_bytes() > size.to_bytes() { return Err(Error::invalid_request(&format!( "disk size {} must be greater than or equal to snapshot size {}", - params.size.to_bytes(), + size.to_bytes(), db_snapshot.size.to_bytes(), ))); } @@ -117,7 +118,7 @@ impl super::Nexus { } params::DiskSource::Image { image_id } => { let (.., db_image) = LookupPath::new(opctx, &self.db_datastore) - .image_id(image_id) + .image_id(*image_id) .fetch() .await?; @@ -133,10 +134,10 @@ impl super::Nexus { // If the size of the image is greater than the size of the // disk, return an error. - if db_image.size.to_bytes() > params.size.to_bytes() { + if db_image.size.to_bytes() > size.to_bytes() { return Err(Error::invalid_request(&format!( "disk size {} must be greater than or equal to image size {}", - params.size.to_bytes(), + size.to_bytes(), db_image.size.to_bytes(), ))); } @@ -145,9 +146,35 @@ impl super::Nexus { } }; + Ok(block_size) + } + + pub(super) async fn validate_disk_create_params( + self: &Arc, + opctx: &OpContext, + authz_project: &authz::Project, + params: ¶ms::DiskCreate, + ) -> Result<(), Error> { + let block_size: u64 = match ¶ms { + params::DiskCreate::Crucible { disk_source, size, .. } => { + self.validate_crucible_disk_create_params( + opctx, + &authz_project, + &disk_source, + *size, + ) + .await? + } + + params::DiskCreate::LocalStorage { .. } => { + // All LocalStorage disks have a 4k block size + 4096 + } + }; + // Reject disks where the block size doesn't evenly divide the // total size - if (params.size.to_bytes() % block_size) != 0 { + if (params.size().to_bytes() % block_size) != 0 { return Err(Error::invalid_value( "size and block_size", format!( @@ -159,7 +186,7 @@ impl super::Nexus { // Reject disks where the size isn't at least // MIN_DISK_SIZE_BYTES - if params.size.to_bytes() < u64::from(MIN_DISK_SIZE_BYTES) { + if params.size().to_bytes() < u64::from(MIN_DISK_SIZE_BYTES) { return Err(Error::invalid_value( "size", format!( @@ -171,7 +198,7 @@ impl super::Nexus { // Reject disks where the MIN_DISK_SIZE_BYTES doesn't evenly // divide the size - if (params.size.to_bytes() % u64::from(MIN_DISK_SIZE_BYTES)) != 0 { + if (params.size().to_bytes() % u64::from(MIN_DISK_SIZE_BYTES)) != 0 { return Err(Error::invalid_value( "size", format!( @@ -181,15 +208,52 @@ impl super::Nexus { )); } - // Reject disks where the size is greated than MAX_DISK_SIZE_BYTES - if params.size.to_bytes() > MAX_DISK_SIZE_BYTES { - return Err(Error::invalid_value( - "size", - format!( - "total size must be less than {}", - ByteCount::try_from(MAX_DISK_SIZE_BYTES).unwrap() - ), - )); + // Check for disk type specific restrictions + match ¶ms { + params::DiskCreate::Crucible { .. } => { + // Reject disks where the size is greated than + // MAX_DISK_SIZE_BYTES. This restriction will be changed or + // removed when multi-subvolume Volumes can be created by Nexus, + // or if the region allocation algorithm changes. + if params.size().to_bytes() > MAX_DISK_SIZE_BYTES { + return Err(Error::invalid_value( + "size", + format!( + "total size must be less than {}", + ByteCount::try_from(MAX_DISK_SIZE_BYTES).unwrap() + ), + )); + } + } + + params::DiskCreate::LocalStorage { .. } => { + // If a user requests some outlandish number of TB for local + // storage, and there isn't a sled allocation that can fulfill + // this, instance create will work but instance start (which + // performs the actual vmm + local storage dataset allocation) + // will fail. + // + // There's an opportunity to consult the latest inventory + // collection here, and if there aren't zpools that are as large + // as this local storage request, reject it. If the user + // mistakenly asks for 30 TB instead of 3 TB, such a check would + // prevent the instance start request from succeeding. Later, if + // new disks are inserted that have a much larger capacity, then + // the same instance create request that could use that larger + // capacity. + // + // We don't want to reject creating a bunch of instances + // requesting local storage, where if all of them were started + // it wouldn't work, but the user's intention is not to ever + // start them all at the same time. But we also don't reserve or + // allocate any local storage here, so we can't prevent users + // from requesting too much local storage to ever start the + // instances for. + // + // TODO consult the latest inventory collection if it isn't too + // expensive, and reject local storage disk requests that are + // too large to be served by any sled. + } } Ok(()) @@ -203,6 +267,7 @@ impl super::Nexus { ) -> CreateResult { let (.., authz_project) = project_lookup.lookup_for(authz::Action::CreateChild).await?; + self.validate_disk_create_params(opctx, &authz_project, params).await?; let saga_params = sagas::disk_create::Params { @@ -217,11 +282,11 @@ impl super::Nexus { .await?; let disk_created = saga_outputs - .lookup_node_output::("created_disk") + .lookup_node_output::("created_disk") .map_err(|e| Error::internal_error(&format!("{:#}", &e))) .internal_context("looking up output from disk create saga")?; - Ok(db::datastore::Disk::Crucible(disk_created)) + Ok(disk_created) } pub(crate) async fn disk_list( @@ -339,6 +404,13 @@ impl super::Nexus { self.volume_remove_read_only_parent(&opctx, disk.volume_id()) .await?; } + + datastore::Disk::LocalStorage(_) => { + return Err(Error::internal_error(&format!( + "cannot remove rop for local storage disk {}", + disk.id() + ))); + } } Ok(()) @@ -361,6 +433,13 @@ impl super::Nexus { db::model::DiskType::Crucible => { // ok } + + db::model::DiskType::LocalStorage => { + return Err(Error::internal_error(&format!( + "cannot import to local storage disk {}", + authz_disk.id() + ))); + } } let disk_state: DiskState = db_disk.state().into(); @@ -402,6 +481,13 @@ impl super::Nexus { let disk = match self.datastore().disk_get(opctx, authz_disk.id()).await? { db::datastore::Disk::Crucible(disk) => disk, + + db::datastore::Disk::LocalStorage(_) => { + return Err(Error::internal_error(&format!( + "cannot import to local storage disk {}", + authz_disk.id() + ))); + } }; let disk_state: DiskState = disk.state().into(); @@ -531,6 +617,13 @@ impl super::Nexus { db::model::DiskType::Crucible => { // ok } + + db::model::DiskType::LocalStorage => { + return Err(Error::internal_error(&format!( + "cannot import to local storage disk {}", + authz_disk.id() + ))); + } } let disk_state: DiskState = db_disk.state().into(); @@ -572,6 +665,13 @@ impl super::Nexus { let disk: datastore::CrucibleDisk = match self.datastore().disk_get(&opctx, authz_disk.id()).await? { datastore::Disk::Crucible(disk) => disk, + + datastore::Disk::LocalStorage(_) => { + return Err(Error::internal_error(&format!( + "cannot finalize local storage disk {}", + authz_disk.id() + ))); + } }; let saga_params = sagas::finalize_disk::Params { diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 098c289eb9f..1922e7eabf1 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -569,18 +569,21 @@ impl super::Nexus { MAX_DISKS_PER_INSTANCE ))); } + for disk in all_disks.iter() { if let params::InstanceDiskAttachment::Create(create) = disk { self.validate_disk_create_params(opctx, &authz_project, create) .await?; } } + if params.external_ips.len() > MAX_EXTERNAL_IPS_PER_INSTANCE { return Err(Error::invalid_request(&format!( "An instance may not have more than {} external IP addresses", MAX_EXTERNAL_IPS_PER_INSTANCE, ))); } + if params .external_ips .iter() @@ -593,6 +596,7 @@ impl super::Nexus { MAX_EPHEMERAL_IPS_PER_INSTANCE, ))); } + if let params::InstanceNetworkInterfaceAttachment::Create(ref ifaces) = params.network_interfaces { @@ -780,6 +784,32 @@ impl super::Nexus { Ok(()) } + /// Returns true if any of the attached disks are type LocalStorage + pub(crate) async fn instance_uses_local_storage( + self: &Arc, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> Result { + let disks = self + .db_datastore + .instance_list_disks( + opctx, + authz_instance, + &PaginatedBy::Name(DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit: std::num::NonZeroU32::new(MAX_DISKS_PER_INSTANCE) + .unwrap(), + }), + ) + .await?; + + Ok(disks.into_iter().any(|disk| match disk { + db::datastore::Disk::LocalStorage(_) => true, + db::datastore::Disk::Crucible(_) => false, + })) + } + pub(crate) async fn instance_migrate( self: &Arc, opctx: &OpContext, @@ -791,6 +821,14 @@ impl super::Nexus { .lookup_for(authz::Action::Modify) .await?; + // Cannot migrate instance if it has local storage + if self.instance_uses_local_storage(opctx, &authz_instance).await? { + return Err(Error::invalid_request(format!( + "cannot migrate {} as it uses local storage", + authz_instance.id() + ))); + } + let state = self .db_datastore .instance_fetch_with_vmm(opctx, &authz_instance) diff --git a/nexus/src/app/instance_platform/mod.rs b/nexus/src/app/instance_platform/mod.rs index 585557defa6..0b79ad2ed38 100644 --- a/nexus/src/app/instance_platform/mod.rs +++ b/nexus/src/app/instance_platform/mod.rs @@ -257,7 +257,6 @@ impl DisksByIdBuilder { self.add_generic_disk(disk, backend) } - #[allow(unused)] // for now! fn add_file_backed_disk( &mut self, disk: &Disk, @@ -491,6 +490,15 @@ impl super::Nexus { builder.add_crucible_disk(disk, &volume)?; } + + db::datastore::Disk::LocalStorage(local_storage_disk) => { + builder.add_file_backed_disk( + disk, + // Use the delegated zvol as the target for the file + // backed disk + local_storage_disk.zvol_path()?, + )?; + } } } @@ -503,9 +511,6 @@ impl super::Nexus { // module's selected device name for the appropriate disk. if let Some(boot_disk_id) = instance.boot_disk_id { if let Some(disk) = disks.0.get(&boot_disk_id) { - // XXX here is where we would restrict the type of disk used for - // a boot disk. should we? - let entry = BootOrderEntry { id: SpecKey::Name(disk.device_name.clone()), }; diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index ab2bcce8da1..edd7bc0cceb 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -41,8 +41,12 @@ pub(crate) struct Params { declare_saga_actions! { disk_create; - CREATE_DISK_RECORD -> "created_disk" { - + sdc_create_disk_record + CREATE_CRUCIBLE_DISK_RECORD -> "crucible_disk" { + + sdc_create_crucible_disk_record + - sdc_create_disk_record_undo + } + CREATE_LOCAL_STORAGE_DISK_RECORD -> "local_storage_disk" { + + sdc_create_local_storage_disk_record - sdc_create_disk_record_undo } REGIONS_ALLOC -> "datasets_and_regions" { @@ -64,7 +68,7 @@ declare_saga_actions! { + sdc_create_volume_record - sdc_create_volume_record_undo } - FINALIZE_DISK_RECORD -> "disk_runtime" { + FINALIZE_DISK_RECORD -> "created_disk" { + sdc_finalize_disk_record } GET_PANTRY_ADDRESS -> "pantry_address" { @@ -104,21 +108,39 @@ impl NexusSaga for SagaDiskCreate { ACTION_GENERATE_ID.as_ref(), )); - builder.append(create_disk_record_action()); - builder.append(regions_alloc_action()); builder.append(space_account_action()); - builder.append(regions_ensure_undo_action()); - builder.append(regions_ensure_action()); - builder.append(create_volume_record_action()); + + match ¶ms.create_params { + params::DiskCreate::Crucible { .. } => { + builder.append(create_crucible_disk_record_action()); + builder.append(regions_alloc_action()); + builder.append(regions_ensure_undo_action()); + builder.append(regions_ensure_action()); + builder.append(create_volume_record_action()); + } + + params::DiskCreate::LocalStorage { .. } => { + builder.append(create_local_storage_disk_record_action()); + } + } + builder.append(finalize_disk_record_action()); - match ¶ms.create_params.disk_source { - params::DiskSource::ImportingBlocks { .. } => { - builder.append(get_pantry_address_action()); - builder.append(call_pantry_attach_for_disk_action()); + match ¶ms.create_params { + params::DiskCreate::Crucible { disk_source, .. } => { + match disk_source { + params::DiskSource::ImportingBlocks { .. } => { + builder.append(get_pantry_address_action()); + builder.append(call_pantry_attach_for_disk_action()); + } + + _ => {} + } } - _ => {} + params::DiskCreate::LocalStorage { .. } => { + // nothing to do! + } } Ok(builder.build()?) @@ -127,48 +149,43 @@ impl NexusSaga for SagaDiskCreate { // disk create saga: action implementations -async fn sdc_create_disk_record( +async fn sdc_create_crucible_disk_record( sagactx: NexusActionContext, ) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let disk_id = sagactx.lookup::("disk_id")?; - // We admittedly reference the volume before it has been allocated, - // but this should be acceptable because the disk remains in a "Creating" - // state until the saga has completed. - let volume_id = sagactx.lookup::("volume_id")?; + let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); - let block_size: db::model::BlockSize = - match ¶ms.create_params.disk_source { - params::DiskSource::Blank { block_size } => { - db::model::BlockSize::try_from(*block_size).map_err(|e| { - ActionError::action_failed(Error::internal_error( - &e.to_string(), - )) - })? - } - params::DiskSource::Snapshot { snapshot_id } => { - let (.., db_snapshot) = - LookupPath::new(&opctx, osagactx.datastore()) - .snapshot_id(*snapshot_id) - .fetch() - .await - .map_err(|e| { - ActionError::action_failed(Error::internal_error( - &e.to_string(), - )) - })?; + let disk_source = match ¶ms.create_params { + params::DiskCreate::Crucible { disk_source, .. } => disk_source, - db_snapshot.block_size - } - params::DiskSource::Image { image_id } => { - let (.., image) = LookupPath::new(&opctx, osagactx.datastore()) - .image_id(*image_id) + params::DiskCreate::LocalStorage { .. } => { + // This should be unreachable given the match performed in + // `make_saga_dag`! + return Err(ActionError::action_failed(Error::internal_error( + "wrong DiskCreate variant!", + ))); + } + }; + + let block_size: db::model::BlockSize = match &disk_source { + params::DiskSource::Blank { block_size } => { + db::model::BlockSize::try_from(*block_size).map_err(|e| { + ActionError::action_failed(Error::internal_error( + &e.to_string(), + )) + })? + } + params::DiskSource::Snapshot { snapshot_id } => { + let (.., db_snapshot) = + LookupPath::new(&opctx, osagactx.datastore()) + .snapshot_id(*snapshot_id) .fetch() .await .map_err(|e| { @@ -177,16 +194,29 @@ async fn sdc_create_disk_record( )) })?; - image.block_size - } - params::DiskSource::ImportingBlocks { block_size } => { - db::model::BlockSize::try_from(*block_size).map_err(|e| { + db_snapshot.block_size + } + params::DiskSource::Image { image_id } => { + let (.., image) = LookupPath::new(&opctx, osagactx.datastore()) + .image_id(*image_id) + .fetch() + .await + .map_err(|e| { ActionError::action_failed(Error::internal_error( &e.to_string(), )) - })? - } - }; + })?; + + image.block_size + } + params::DiskSource::ImportingBlocks { block_size } => { + db::model::BlockSize::try_from(*block_size).map_err(|e| { + ActionError::action_failed(Error::internal_error( + &e.to_string(), + )) + })? + } + }; let disk = db::model::Disk::new( disk_id, @@ -199,8 +229,11 @@ async fn sdc_create_disk_record( let disk_type_crucible = db::model::DiskTypeCrucible::new( disk_id, - volume_id, - ¶ms.create_params, + // We admittedly reference the volume before it has been allocated, but + // this should be acceptable because the disk remains in a "Creating" + // state until the saga has completed. + sagactx.lookup::("volume_id")?, + &disk_source, ); let crucible_disk = @@ -231,6 +264,7 @@ async fn sdc_create_disk_record_undo( let osagactx = sagactx.user_data(); let disk_id = sagactx.lookup::("disk_id")?; + osagactx .datastore() .project_delete_disk_no_auth( @@ -238,9 +272,75 @@ async fn sdc_create_disk_record_undo( &[DiskState::Detached, DiskState::Faulted, DiskState::Creating], ) .await?; + Ok(()) } +async fn sdc_create_local_storage_disk_record( + sagactx: NexusActionContext, +) -> Result { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let disk_id = sagactx.lookup::("disk_id")?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let block_size = match ¶ms.create_params { + params::DiskCreate::Crucible { .. } => { + // This should be unreachable given the match performed in + // `make_saga_dag`! + return Err(ActionError::action_failed(Error::internal_error( + "wrong DiskCreate variant!", + ))); + } + + params::DiskCreate::LocalStorage { .. } => { + // All LocalStorage disks have a block size of 4k + db::model::BlockSize::AdvancedFormat + } + }; + + let disk = db::model::Disk::new( + disk_id, + params.project_id, + ¶ms.create_params, + block_size, + db::model::DiskRuntimeState::new(), + db::model::DiskType::LocalStorage, + ); + + let disk_type_local_storage = db::model::DiskTypeLocalStorage::new( + disk_id, + params.create_params.size(), + ) + .map_err(ActionError::action_failed)?; + + let local_storage_disk = + db::datastore::LocalStorageDisk::new(disk, disk_type_local_storage); + + let (.., authz_project) = LookupPath::new(&opctx, osagactx.datastore()) + .project_id(params.project_id) + .lookup_for(authz::Action::CreateChild) + .await + .map_err(ActionError::action_failed)?; + + osagactx + .datastore() + .project_create_disk( + &opctx, + &authz_project, + db::datastore::Disk::LocalStorage(local_storage_disk.clone()), + ) + .await + .map_err(ActionError::action_failed)?; + + Ok(local_storage_disk) +} + async fn sdc_alloc_regions( sagactx: NexusActionContext, ) -> Result, ActionError> { @@ -266,13 +366,25 @@ async fn sdc_alloc_regions( let strategy = &osagactx.nexus().default_region_allocation_strategy; + let disk_source = match ¶ms.create_params { + params::DiskCreate::Crucible { disk_source, .. } => disk_source, + + params::DiskCreate::LocalStorage { .. } => { + // This should be unreachable given the match performed in + // `make_saga_dag`! + return Err(ActionError::action_failed(Error::internal_error( + "wrong DiskCreate variant!", + ))); + } + }; + let datasets_and_regions = osagactx .datastore() .disk_region_allocate( &opctx, volume_id, - ¶ms.create_params.disk_source, - params.create_params.size, + &disk_source, + params.create_params.size(), &strategy, ) .await @@ -304,19 +416,18 @@ async fn sdc_account_space( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let disk_created = - sagactx.lookup::("created_disk")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); + let disk_id = sagactx.lookup::("disk_id")?; osagactx .datastore() .virtual_provisioning_collection_insert_disk( &opctx, - disk_created.id(), + disk_id, params.project_id, - disk_created.size(), + params.create_params.size().into(), ) .await .map_err(ActionError::action_failed)?; @@ -329,19 +440,18 @@ async fn sdc_account_space_undo( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let disk_created = - sagactx.lookup::("created_disk")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); + let disk_id = sagactx.lookup::("disk_id")?; osagactx .datastore() .virtual_provisioning_collection_delete_disk( &opctx, - disk_created.id(), + disk_id, params.project_id, - disk_created.size(), + params.create_params.size().into(), ) .await .map_err(ActionError::action_failed)?; @@ -384,8 +494,20 @@ async fn sdc_regions_ensure( ¶ms.serialized_authn, ); + let disk_source = match ¶ms.create_params { + params::DiskCreate::Crucible { disk_source, .. } => disk_source, + + params::DiskCreate::LocalStorage { .. } => { + // This should be unreachable given the match performed in + // `make_saga_dag`! + return Err(ActionError::action_failed(Error::internal_error( + "wrong DiskCreate variant!", + ))); + } + }; + let mut read_only_parent: Option> = - match ¶ms.create_params.disk_source { + match disk_source { params::DiskSource::Blank { block_size: _ } => None, params::DiskSource::Snapshot { snapshot_id } => { debug!(log, "grabbing snapshot {}", snapshot_id); @@ -646,7 +768,7 @@ async fn sdc_create_volume_record_undo( async fn sdc_finalize_disk_record( sagactx: NexusActionContext, -) -> Result<(), ActionError> { +) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let datastore = osagactx.datastore(); @@ -656,37 +778,65 @@ async fn sdc_finalize_disk_record( ); let disk_id = sagactx.lookup::("disk_id")?; - let disk_created = - sagactx.lookup::("created_disk")?; + let (.., authz_disk) = LookupPath::new(&opctx, datastore) .disk_id(disk_id) .lookup_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; - // TODO-security Review whether this can ever fail an authz check. We don't - // want this to ever fail the authz check here -- if it did, we would have - // wanted to catch that a lot sooner. It wouldn't make sense for it to fail - // anyway because we're modifying something that *we* just created. Right - // now, it's very unlikely that it would ever fail because we checked - // Action::CreateChild on the Project before we created this saga. The only - // role that gets that permission is "project collaborator", which also gets - // Action::Modify on Disks within the Project. So this shouldn't break in - // practice. However, that's brittle. It would be better if this were - // better guaranteed. - match ¶ms.create_params.disk_source { - params::DiskSource::ImportingBlocks { block_size: _ } => { - datastore - .disk_update_runtime( - &opctx, - &authz_disk, - &disk_created.runtime().import_ready(), - ) - .await - .map_err(ActionError::action_failed)?; + // TODO-security Review whether further actions in this node can ever fail + // an authz check. We don't want this to ever fail the authz check here -- + // if it did, we would have wanted to catch that a lot sooner. It wouldn't + // make sense for it to fail anyway because we're modifying something that + // *we* just created. Right now, it's very unlikely that it would ever fail + // because we checked Action::CreateChild on the Project before we created + // this saga. The only role that gets that permission is "project + // collaborator", which also gets Action::Modify on Disks within the + // Project. So this shouldn't break in practice. However, that's brittle. + // It would be better if this were better guaranteed. + + match params.create_params { + params::DiskCreate::Crucible { disk_source, .. } => { + let disk_created = db::datastore::Disk::Crucible( + sagactx + .lookup::("crucible_disk")?, + ); + + match disk_source { + params::DiskSource::ImportingBlocks { .. } => { + datastore + .disk_update_runtime( + &opctx, + &authz_disk, + &disk_created.runtime().import_ready(), + ) + .await + .map_err(ActionError::action_failed)?; + } + + _ => { + datastore + .disk_update_runtime( + &opctx, + &authz_disk, + &disk_created.runtime().detach(), + ) + .await + .map_err(ActionError::action_failed)?; + } + } + + Ok(disk_created) } - _ => { + params::DiskCreate::LocalStorage { .. } => { + let disk_created = db::datastore::Disk::LocalStorage( + sagactx.lookup::( + "local_storage_disk", + )?, + ); + datastore .disk_update_runtime( &opctx, @@ -695,10 +845,10 @@ async fn sdc_finalize_disk_record( ) .await .map_err(ActionError::action_failed)?; + + Ok(disk_created) } } - - Ok(()) } async fn sdc_get_pantry_address( @@ -749,7 +899,7 @@ async fn sdc_call_pantry_attach_for_disk( ¶ms.serialized_authn, ); let disk_created = - sagactx.lookup::("created_disk")?; + sagactx.lookup::("crucible_disk")?; let pantry_address = sagactx.lookup::("pantry_address")?; @@ -862,7 +1012,7 @@ pub(crate) mod test { const PROJECT_NAME: &str = "springfield-squidport"; pub fn new_disk_create_params() -> params::DiskCreate { - params::DiskCreate { + params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().expect("Invalid disk name"), description: "My disk".to_string(), @@ -907,7 +1057,7 @@ pub(crate) mod test { let output = nexus.sagas.saga_execute::(params).await.unwrap(); let disk = output - .lookup_node_output::("created_disk") + .lookup_node_output::("created_disk") .unwrap(); assert_eq!(disk.project_id(), project_id); } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index f6fe68151fe..07a9f22a267 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -94,6 +94,11 @@ impl NexusSaga for SagaDiskDelete { "params_for_volume_delete_subsaga", )); } + + datastore::Disk::LocalStorage(_) => { + // XXX nothing to do here yet, need the delegate_zvol branch to + // merge first. + } } Ok(builder.build()?) diff --git a/nexus/src/app/sagas/finalize_disk.rs b/nexus/src/app/sagas/finalize_disk.rs index 98e85c3faa5..4cee213724a 100644 --- a/nexus/src/app/sagas/finalize_disk.rs +++ b/nexus/src/app/sagas/finalize_disk.rs @@ -276,6 +276,12 @@ async fn sfd_get_pantry_address( .map_err(ActionError::action_failed)? { datastore::Disk::Crucible(disk) => disk, + + datastore::Disk::LocalStorage(_) => { + // This is unreachable because the saga only accepts a CrucibleDisk, + // and disks cannot ever change type. + unreachable!(); + } }; // At any stage of executing this saga, if the disk moves from state diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 43afdd9430f..cdeb344e307 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1201,7 +1201,7 @@ async fn ensure_instance_disk_attach_state( let disk_name = match params.attach_params { InstanceDiskAttachment::Create(create_params) => { - db::model::Name(create_params.identity.name) + db::model::Name(create_params.identity().name.clone()) } InstanceDiskAttachment::Attach(attach_params) => { db::model::Name(attach_params.name) diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index 3931fd6faae..e9038addc5e 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -821,7 +821,10 @@ pub(crate) mod test { let disk_id = disk.identity.id; let Disk::Crucible(disk) = - datastore.disk_get(&opctx, disk_id).await.unwrap(); + datastore.disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); @@ -1136,7 +1139,10 @@ pub(crate) mod test { let disk_id = disk.identity.id; let Disk::Crucible(disk) = - datastore.disk_get(&opctx, disk_id).await.unwrap(); + datastore.disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); @@ -1209,7 +1215,10 @@ pub(crate) mod test { let disk_id = disk.identity.id; let Disk::Crucible(disk) = - datastore.disk_get(&opctx, disk_id).await.unwrap(); + datastore.disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); diff --git a/nexus/src/app/sagas/region_snapshot_replacement_start.rs b/nexus/src/app/sagas/region_snapshot_replacement_start.rs index 285b05376e2..a834e37d4a2 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_start.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_start.rs @@ -1277,7 +1277,10 @@ pub(crate) mod test { let disk_id = disk.identity.id; let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk_id).await.unwrap(); + datastore.disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; // Create a snapshot let snapshot = @@ -1858,7 +1861,10 @@ pub(crate) mod test { // Before expunging any physical disk, save some DB models let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2013,7 +2019,10 @@ pub(crate) mod test { // Before expunging any physical disk, save some DB models let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index fd6035bd439..7eb101ebcec 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -921,6 +921,12 @@ async fn ssc_get_pantry_address( .map_err(ActionError::action_failed)? { db::datastore::Disk::Crucible(disk) => disk, + + db::datastore::Disk::LocalStorage(_) => { + // This is unreachable because the saga only accepts a CrucibleDisk, + // and disks cannot ever change type. + unreachable!(); + } }; let pantry_address = if let Some(pantry_address) = disk.pantry_address() { @@ -2013,7 +2019,10 @@ mod test { .expect("Failed to look up created disk"); let Disk::Crucible(disk) = - nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let silo_id = authz_silo.id(); let project_id = authz_project.id(); @@ -2241,7 +2250,10 @@ mod test { .datastore() .disk_get(&opctx, disk_id) .await - .unwrap(); + .unwrap() + else { + unreachable!() + }; // If the pantry isn't being used, make sure the disk is // attached. Note that under normal circumstances, a @@ -2378,7 +2390,10 @@ mod test { .expect("Failed to look up created disk"); let Disk::Crucible(disk) = - nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let silo_id = authz_silo.id(); let project_id = authz_project.id(); @@ -2435,7 +2450,10 @@ mod test { .expect("Failed to look up created disk"); let Disk::Crucible(disk) = - nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; assert!( nexus @@ -2493,7 +2511,10 @@ mod test { .expect("Failed to look up created disk"); let Disk::Crucible(disk) = - nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap() + else { + unreachable!() + }; let silo_id = authz_silo.id(); let project_id = authz_project.id(); diff --git a/nexus/src/app/snapshot.rs b/nexus/src/app/snapshot.rs index 355e5290aec..03b6f48aa55 100644 --- a/nexus/src/app/snapshot.rs +++ b/nexus/src/app/snapshot.rs @@ -100,6 +100,12 @@ impl super::Nexus { let disk: datastore::CrucibleDisk = match self.datastore().disk_get(&opctx, authz_disk.id()).await? { datastore::Disk::Crucible(disk) => disk, + + datastore::Disk::LocalStorage(_) => { + return Err(Error::invalid_request( + "can't create a snapshot of a local storage disk", + )); + } }; // If there isn't a running propolis, Nexus needs to use the Crucible diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index b15d673ef99..2e0d69430cc 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -538,6 +538,7 @@ pub async fn create_project( .await } +/// Create a regular Crucible disk pub async fn create_disk( client: &ClientTestContext, project_name: &str, @@ -547,7 +548,7 @@ pub async fn create_disk( object_create( client, &url, - ¶ms::DiskCreate { + ¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: disk_name.parse().unwrap(), description: String::from("sells rainsticks"), @@ -571,7 +572,7 @@ pub async fn create_disk_from_snapshot( object_create( client, &url, - ¶ms::DiskCreate { + ¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: disk_name.parse().unwrap(), description: String::from("sells rainsticks"), @@ -1548,9 +1549,27 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { /// /// Does not inform sled agents to use these pools. /// - /// See: [Self::propagate_datasets_to_sleds] if you want to send - /// this configuration to a simulated sled agent. + /// See: [Self::propagate_datasets_to_sleds] if you want to send this + /// configuration to a simulated sled agent. pub async fn add_zpool_with_datasets(&mut self, sled_id: SledUuid) { + self.add_sized_zpool_with_datasets( + sled_id, + Self::DEFAULT_ZPOOL_SIZE_GIB, + ) + .await + } + + /// Adds the zpool (of arbitrary size) and datasets into the database. + /// + /// Does not inform sled agents to use these pools. + /// + /// See: [Self::propagate_datasets_to_sleds] if you want to send this + /// configuration to a simulated sled agent. + pub async fn add_sized_zpool_with_datasets( + &mut self, + sled_id: SledUuid, + gibibytes: u32, + ) { self.add_zpool_with_datasets_ext( sled_id, PhysicalDiskUuid::new_v4(), @@ -1565,7 +1584,7 @@ impl<'a, N: NexusServer> DiskTest<'a, N> { kind: DatasetKind::Debug, }, ], - Self::DEFAULT_ZPOOL_SIZE_GIB, + gibibytes, ) .await } diff --git a/nexus/tests/integration_tests/crucible_replacements.rs b/nexus/tests/integration_tests/crucible_replacements.rs index 574f633afd8..a6f4a8e89a2 100644 --- a/nexus/tests/integration_tests/crucible_replacements.rs +++ b/nexus/tests/integration_tests/crucible_replacements.rs @@ -237,7 +237,10 @@ async fn test_region_replacement_does_not_create_freed_region( // Before expunging the physical disk, save the DB model let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -330,7 +333,10 @@ mod region_replacement { // allocated region of that disk let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -787,7 +793,10 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( // Before deleting the disk, save the DB model let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -1357,7 +1366,10 @@ mod region_snapshot_replacement { // first allocated region of that disk let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -1429,7 +1441,10 @@ mod region_snapshot_replacement { let Disk::Crucible(db_disk_from_snapshot) = datastore .disk_get(&opctx, disk_from_snapshot.identity.id) .await - .unwrap(); + .unwrap() + else { + unreachable!() + }; assert!(volumes_set.contains(&db_snapshot.volume_id())); assert!(volumes_set.contains(&db_disk_from_snapshot.volume_id())); @@ -1670,7 +1685,10 @@ mod region_snapshot_replacement { .datastore .disk_get(&self.opctx(), disk_from_snapshot.identity.id) .await - .unwrap(); + .unwrap() + else { + unreachable!() + }; let result = self .datastore @@ -2057,7 +2075,10 @@ async fn test_replacement_sanity(cptestctx: &ControlPlaneTestContext) { // Before expunging the physical disk, save the DB model let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -2165,7 +2186,10 @@ async fn test_region_replacement_triple_sanity( // Before expunging any physical disk, save some DB models let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2288,7 +2312,10 @@ async fn test_region_replacement_triple_sanity_2( // Before expunging any physical disk, save some DB models let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2447,7 +2474,10 @@ async fn test_replacement_sanity_twice(cptestctx: &ControlPlaneTestContext) { // snapshot. let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -2551,7 +2581,10 @@ async fn test_read_only_replacement_sanity( // snapshot. let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); @@ -2716,7 +2749,10 @@ async fn test_replacement_sanity_twice_after_snapshot_delete( // snapshot. let Disk::Crucible(db_disk) = - datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); + datastore.disk_get(&opctx, disk.identity.id).await.unwrap() + else { + unreachable!() + }; assert_eq!(db_disk.id(), disk.identity.id); diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index de2d32e52c4..ab7176691aa 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -128,6 +128,10 @@ async fn get_crucible_disk( match disk { datastore::Disk::Crucible(disk) => disk, + + datastore::Disk::LocalStorage(_) => { + unreachable!(); + } } } @@ -351,7 +355,7 @@ async fn test_disk_create_disk_that_already_exists_fails( let disks_url = get_disks_url(); // Create a disk. - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -755,7 +759,7 @@ async fn test_disk_region_creation_failure( ); let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -806,7 +810,7 @@ async fn test_disk_invalid_block_size_rejected( let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -849,7 +853,7 @@ async fn test_disk_reject_total_size_not_divisible_by_block_size( ); let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -883,7 +887,7 @@ async fn test_disk_reject_total_size_less_than_min_disk_size_bytes( // Attempt to allocate the disk, observe a server error. let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -926,7 +930,7 @@ async fn test_disk_reject_total_size_greater_than_max_disk_size_bytes( // Atempt to allocate the disk, observe a server error. let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -970,7 +974,7 @@ async fn test_disk_reject_total_size_not_divisible_by_min_disk_size( // Attempt to allocate the disk, observe a server error. let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -1020,7 +1024,7 @@ async fn test_disk_backed_by_multiple_region_sets( // Ask for a 20 gibibyte disk. let disk_size = ByteCount::from_gibibytes_u32(20); let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -1056,7 +1060,7 @@ async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { // Ask for a 300 gibibyte disk (but only 16 is available) let disk_size = ByteCount::from_gibibytes_u32(300); let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -1143,7 +1147,7 @@ async fn test_disk_virtual_provisioning_collection( // in which it was allocated let disk_size = ByteCount::from_gibibytes_u32(1); let disks_url = get_disks_url(); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-one".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1202,7 +1206,7 @@ async fn test_disk_virtual_provisioning_collection( // Each project should be using "one disk" of real storage, but the org // should be using both. let disks_url = format!("/v1/disks?project={}", PROJECT_NAME_2); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-two".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1304,7 +1308,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( // Create a 1 GB disk let disk_size = ByteCount::from_gibibytes_u32(1); let disks_url = get_disks_url(); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-one".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1438,7 +1442,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { // Create a 1 GB disk let disk_size = ByteCount::from_gibibytes_u32(1); let disks_url = get_disks_url(); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-one".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1578,7 +1582,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { let disk_size = ByteCount::from_gibibytes_u32(7); let disks_url = get_disks_url(); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-one".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1613,7 +1617,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { // Ask for a 6 gibibyte disk, this should fail because there isn't space // available. let disk_size = ByteCount::from_gibibytes_u32(6); - let disk_two = params::DiskCreate { + let disk_two = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-two".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1665,7 +1669,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { // Ask for a 10 gibibyte disk. let disk_size = ByteCount::from_gibibytes_u32(10); - let disk_three = params::DiskCreate { + let disk_three = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-three".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1716,7 +1720,7 @@ async fn test_multiple_disks_multiple_zpools( let disk_size = ByteCount::from_gibibytes_u32(10); let disks_url = get_disks_url(); - let disk_one = params::DiskCreate { + let disk_one = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-one".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1739,7 +1743,7 @@ async fn test_multiple_disks_multiple_zpools( // Ask for another 10 gibibyte disk let disk_size = ByteCount::from_gibibytes_u32(10); - let disk_two = params::DiskCreate { + let disk_two = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk-two".parse().unwrap(), description: String::from("sells rainsticks"), @@ -1768,7 +1772,7 @@ async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { create_project_and_pool(client).await; let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -1813,7 +1817,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Create a disk let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -2441,7 +2445,7 @@ async fn test_do_not_provision_on_dataset_not_enough( let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -2506,7 +2510,7 @@ async fn test_zpool_control_plane_storage_buffer( let disks_url = get_disks_url(); // Creating a 8G disk will work (10G size used due to reservation overhead) - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk1".parse().unwrap(), description: String::from("sells rainsticks"), @@ -2529,7 +2533,7 @@ async fn test_zpool_control_plane_storage_buffer( // Creating a 4G disk will also work (5G size used due to reservation // overhead plus the previous 10G size used is less than 16G) - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk2".parse().unwrap(), description: String::from("sells rainsticks"), diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index eca4777fbbf..964be64e966 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -388,7 +388,7 @@ pub static DEMO_DISK_URL: LazyLock = LazyLock::new(|| { }); pub static DEMO_DISK_CREATE: LazyLock = LazyLock::new(|| { - params::DiskCreate { + params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DEMO_DISK_NAME.clone(), description: "".parse().unwrap(), @@ -408,7 +408,7 @@ pub static DEMO_IMPORT_DISK_NAME: LazyLock = LazyLock::new(|| "demo-import-disk".parse().unwrap()); pub static DEMO_IMPORT_DISK_CREATE: LazyLock = LazyLock::new(|| { - params::DiskCreate { + params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DEMO_IMPORT_DISK_NAME.clone(), description: "".parse().unwrap(), diff --git a/nexus/tests/integration_tests/images.rs b/nexus/tests/integration_tests/images.rs index 998a4283b18..a4ea9cedf15 100644 --- a/nexus/tests/integration_tests/images.rs +++ b/nexus/tests/integration_tests/images.rs @@ -156,7 +156,7 @@ async fn test_make_disk_from_image(cptestctx: &ControlPlaneTestContext) { .execute_and_parse_unwrap::() .await; - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk".parse().unwrap(), description: String::from("sells rainsticks"), @@ -195,7 +195,7 @@ async fn test_make_disk_from_other_project_image_fails( .execute_and_parse_unwrap::() .await; - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "stolen-disk".parse().unwrap(), description: String::from("yoink"), @@ -244,7 +244,7 @@ async fn test_make_disk_from_image_too_small( .execute_and_parse_unwrap::() .await; - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk".parse().unwrap(), description: String::from("sells rainsticks"), @@ -252,7 +252,6 @@ async fn test_make_disk_from_image_too_small( disk_source: params::DiskSource::Image { image_id: alpine_image.identity.id, }, - // Nexus defines YouCanBootAnythingAsLongAsItsAlpine size as 100M size: ByteCount::from(90 * 1024 * 1024), }; @@ -413,7 +412,7 @@ async fn test_image_from_other_project_snapshot_fails( .execute_and_parse_unwrap() .await; // Create a disk from this image - let disk_create_params = params::DiskCreate { + let disk_create_params = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk".parse().unwrap(), description: "meow".into(), diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 228569faa4a..a84b1fcbc98 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -2610,7 +2610,7 @@ async fn test_instance_using_image_from_other_project_fails( params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![params::InstanceDiskAttachment::Create( - params::DiskCreate { + params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "stolen".parse().unwrap(), description: "i stole an image".into(), @@ -4093,7 +4093,7 @@ async fn test_instance_create_attach_disks( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], boot_disk: Some(params::InstanceDiskAttachment::Create( - params::DiskCreate { + params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: Name::try_from(String::from("created-disk")).unwrap(), description: String::from( @@ -4107,19 +4107,21 @@ async fn test_instance_create_attach_disks( }, )), disks: vec![ - params::InstanceDiskAttachment::Create(params::DiskCreate { - identity: IdentityMetadataCreateParams { - name: Name::try_from(String::from("created-disk2")) - .unwrap(), - description: String::from( - "A data disk that was created by instance create", - ), - }, - size: ByteCount::from_gibibytes_u32(4), - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(512).unwrap(), + params::InstanceDiskAttachment::Create( + params::DiskCreate::Crucible { + identity: IdentityMetadataCreateParams { + name: Name::try_from(String::from("created-disk2")) + .unwrap(), + description: String::from( + "A data disk that was created by instance create", + ), + }, + size: ByteCount::from_gibibytes_u32(4), + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, }, - }), + ), params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: attachable_disk.identity.name.clone(), @@ -4208,16 +4210,19 @@ async fn test_instance_create_attach_disks_undo( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![ - params::InstanceDiskAttachment::Create(params::DiskCreate { - identity: IdentityMetadataCreateParams { - name: Name::try_from(String::from("probablydata")).unwrap(), - description: String::from("probably data"), - }, - size: ByteCount::from_gibibytes_u32(4), - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(512).unwrap(), + params::InstanceDiskAttachment::Create( + params::DiskCreate::Crucible { + identity: IdentityMetadataCreateParams { + name: Name::try_from(String::from("probablydata")) + .unwrap(), + description: String::from("probably data"), + }, + size: ByteCount::from_gibibytes_u32(4), + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, }, - }), + ), params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: regular_disk.identity.name }, ), @@ -8000,7 +8005,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { } } -async fn instance_get( +pub async fn instance_get( client: &ClientTestContext, instance_url: &str, ) -> Instance { diff --git a/nexus/tests/integration_tests/local_storage.rs b/nexus/tests/integration_tests/local_storage.rs new file mode 100644 index 00000000000..d8635d21b4d --- /dev/null +++ b/nexus/tests/integration_tests/local_storage.rs @@ -0,0 +1,165 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests local storage support + +use dropshot::test_util::ClientTestContext; +use http::StatusCode; +use http::method::Method; +use nexus_test_utils::http_testing::AuthnMode; +use nexus_test_utils::http_testing::NexusRequest; +use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_default_ip_pool; +use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::{params, views}; +use omicron_common::api::external; +use omicron_nexus::app::MAX_DISK_SIZE_BYTES; +use omicron_nexus::app::MIN_DISK_SIZE_BYTES; +use std::convert::TryFrom; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +type DiskTest<'a> = + nexus_test_utils::resource_helpers::DiskTest<'a, omicron_nexus::Server>; + +static PROJECT_NAME: &str = "springfield-squidport"; + +fn get_project_selector() -> String { + format!("project={}", PROJECT_NAME) +} + +fn get_disks_url() -> String { + format!("/v1/disks?{}", get_project_selector()) +} + +pub async fn create_project_and_pool( + client: &ClientTestContext, +) -> views::Project { + create_default_ip_pool(client).await; + create_project(client, PROJECT_NAME).await +} + +// Test the various ways Nexus can reject a local storage disk based on sizes +#[nexus_test] +async fn test_reject_creating_local_storage_disk( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create some disks + DiskTest::new(&cptestctx).await; + + create_project_and_pool(&client).await; + + let disks_url = get_disks_url(); + + // Reject where block size doesn't evenly divide total size (note that all + // local storage disks have a block size of 4096) + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(¶ms::DiskCreate::LocalStorage { + identity: external::IdentityMetadataCreateParams { + name: "bad-disk".parse().unwrap(), + description: String::from("bad disk"), + }, + + size: external::ByteCount::try_from( + 2 * MIN_DISK_SIZE_BYTES + 512, + ) + .unwrap(), + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + "unsupported value for \"size and block_size\": total size must be a multiple of block size 4096", + ); + + // Reject disks where the MIN_DISK_SIZE_BYTES doesn't evenly divide + // the size + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(¶ms::DiskCreate::LocalStorage { + identity: external::IdentityMetadataCreateParams { + name: "bad-disk".parse().unwrap(), + description: String::from("bad disk"), + }, + + size: external::ByteCount::try_from( + 2 * MIN_DISK_SIZE_BYTES + 4096, + ) + .unwrap(), + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap(); + assert_eq!( + error.message, + "unsupported value for \"size\": total size must be a multiple of 1 GiB", + ); +} + +// Test creating a local storage disk larger than MAX_DISK_SIZE_BYTES +#[nexus_test] +async fn test_create_large_local_storage_disk( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create some giant disks + + let mut disk_test = DiskTest::new(&cptestctx).await; + + for sled_agent in cptestctx.all_sled_agents() { + disk_test + .add_sized_zpool_with_datasets( + sled_agent.sled_agent.id, + 7 * 1024, // 7 TiB + ) + .await; + } + + disk_test.propagate_datasets_to_sleds().await; + + create_project_and_pool(&client).await; + + let disks_url = get_disks_url(); + + // A 5 TiB disk! + let large_disk_size = external::ByteCount::from_gibibytes_u32(5 * 1024); + + assert!(large_disk_size.to_bytes() > MAX_DISK_SIZE_BYTES); + + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(¶ms::DiskCreate::LocalStorage { + identity: external::IdentityMetadataCreateParams { + name: "chonk-disk".parse().unwrap(), + description: String::from("chonk"), + }, + + size: large_disk_size, + })) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap(); +} diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 01fbcb0bc5d..fa9d0429e31 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -29,6 +29,7 @@ mod instances; mod internet_gateway; mod inventory_matching; mod ip_pools; +mod local_storage; mod metrics; mod metrics_querier; mod multicast; diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index fc5becdb0e2..a30464e782e 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -4,6 +4,7 @@ //! Tests Nexus' interactions with Crucible's pantry +use crate::integration_tests::instances::instance_simulate; use crate::integration_tests::instances::instance_wait_for_state; use dropshot::test_util::ClientTestContext; use http::StatusCode; @@ -27,10 +28,8 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceState; use omicron_nexus::Nexus; -use omicron_nexus::TestInterfaces as _; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; -use sled_agent_client::TestInterfaces as _; use std::sync::Arc; use uuid::Uuid; @@ -87,15 +86,6 @@ async fn set_instance_state( .unwrap() } -async fn instance_simulate(nexus: &Arc, id: &InstanceUuid) { - let info = nexus - .active_instance_info(id, None) - .await - .unwrap() - .expect("instance must be on a sled to simulate a state change"); - info.sled_client.vmm_finish_transition(info.propolis_id).await; -} - async fn disk_get(client: &ClientTestContext, disk_url: &str) -> Disk { NexusRequest::object_get(client, disk_url) .authn_as(AuthnMode::PrivilegedUser) @@ -130,7 +120,7 @@ async fn create_disk_with_state_importing_blocks(client: &ClientTestContext) { let _disk: Disk = object_create( client, &url, - ¶ms::DiskCreate { + ¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), @@ -357,7 +347,7 @@ async fn test_disk_create_for_importing(cptestctx: &ControlPlaneTestContext) { create_project_and_pool(client).await; let disks_url = get_disks_url(); - let new_disk = params::DiskCreate { + let new_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: DISK_NAME.parse().unwrap(), description: String::from("sells rainsticks"), diff --git a/nexus/tests/integration_tests/quotas.rs b/nexus/tests/integration_tests/quotas.rs index ee718245961..c28997e452b 100644 --- a/nexus/tests/integration_tests/quotas.rs +++ b/nexus/tests/integration_tests/quotas.rs @@ -177,7 +177,7 @@ impl ResourceAllocator { Method::POST, "/v1/disks?project=project", ) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: name.parse().unwrap(), description: "".into(), diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 0c4a94870c0..e258a96e745 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -100,7 +100,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { // Create a disk from this image let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -211,7 +211,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { // Create a disk from this image let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -308,7 +308,7 @@ async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { // Create a disk from this image let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -398,7 +398,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { // Create a blank disk let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -459,7 +459,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { // Create a disk from this snapshot let disk_size = ByteCount::from_gibibytes_u32(2); let snap_disk_name: Name = "snap-disk".parse().unwrap(); - let snap_disk = params::DiskCreate { + let snap_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: snap_disk_name.clone(), description: String::from("snapshot of 'sells rainsticks'"), @@ -601,7 +601,7 @@ async fn test_reject_creating_disk_from_snapshot( // Reject where block size doesn't evenly divide total size let error = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "bad-disk".parse().unwrap(), description: String::from("bad disk"), @@ -633,7 +633,7 @@ async fn test_reject_creating_disk_from_snapshot( // Reject where size of snapshot is greater than the disk's let error = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "bad-disk".parse().unwrap(), description: String::from("bad disk"), @@ -666,7 +666,7 @@ async fn test_reject_creating_disk_from_snapshot( // the size let error = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "bad-disk".parse().unwrap(), description: String::from("bad disk"), @@ -763,7 +763,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( // this anyway. let error = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "bad-disk".parse().unwrap(), description: String::from("bad disk"), @@ -852,7 +852,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( format!("/v1/disks?project={}", second_project.identity.name); let error = NexusRequest::new( RequestBuilder::new(client, Method::POST, &second_disks_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "stolen-disk".parse().unwrap(), description: String::from("stolen disk"), @@ -888,7 +888,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { let disk_size = ByteCount::try_from(gibibytes * 1024 * 1024 * 1024).unwrap(); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -959,7 +959,7 @@ async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { // Create a disk from this image let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -1252,7 +1252,7 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { // Create a blank disk let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -1488,7 +1488,10 @@ async fn test_region_allocation_for_snapshot( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk_id) .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); diff --git a/nexus/tests/integration_tests/utilization.rs b/nexus/tests/integration_tests/utilization.rs index 4e583301c6e..580adebd885 100644 --- a/nexus/tests/integration_tests/utilization.rs +++ b/nexus/tests/integration_tests/utilization.rs @@ -140,7 +140,7 @@ async fn test_utilization_view(cptestctx: &ControlPlaneTestContext) { // provision disk NexusRequest::new( RequestBuilder::new(client, Method::POST, &disk_url) - .body(Some(¶ms::DiskCreate { + .body(Some(¶ms::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "test-disk".parse().unwrap(), description: "".into(), diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index ebf168b80c4..c4066bb4ff9 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -144,7 +144,7 @@ async fn create_base_disk( base_disk_name: &Name, ) -> external::Disk { let disk_size = ByteCount::from_gibibytes_u32(2); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -404,7 +404,7 @@ async fn test_snapshot_prevents_other_disk( // means the region wasn't deleted. let disk_size = ByteCount::from_gibibytes_u32(10); let next_disk_name: Name = "next-disk".parse().unwrap(); - let next_disk = params::DiskCreate { + let next_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: next_disk_name.clone(), description: String::from("will fail"), @@ -470,7 +470,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( // Create a blank disk let disk_size = ByteCount::from_gibibytes_u32(2); let first_disk_name: Name = "first-disk".parse().unwrap(); - let first_disk = params::DiskCreate { + let first_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: first_disk_name.clone(), description: String::from("disk 1"), @@ -512,7 +512,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( // Create another blank disk let second_disk_name: Name = "second-disk".parse().unwrap(); - let second_disk = params::DiskCreate { + let second_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: second_disk_name.clone(), description: String::from("disk 1"), @@ -605,7 +605,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( // Create a blank disk let disk_size = ByteCount::from_gibibytes_u32(2); let first_disk_name: Name = "first-disk".parse().unwrap(); - let first_disk = params::DiskCreate { + let first_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: first_disk_name.clone(), description: String::from("disk 1"), @@ -647,7 +647,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( // Create another blank disk let second_disk_name: Name = "second-disk".parse().unwrap(); - let second_disk = params::DiskCreate { + let second_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: second_disk_name.clone(), description: String::from("disk 1"), @@ -735,7 +735,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( // Create a blank disk let disk_size = ByteCount::from_gibibytes_u32(1); let layer_1_disk_name: Name = "layer-1-disk".parse().unwrap(); - let layer_1_disk = params::DiskCreate { + let layer_1_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: layer_1_disk_name.clone(), description: String::from("layer 1"), @@ -777,7 +777,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( // Create a layer 2 disk out of the layer 1 snapshot let layer_2_disk_name: Name = "layer-2-disk".parse().unwrap(); - let layer_2_disk = params::DiskCreate { + let layer_2_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: layer_2_disk_name.clone(), description: String::from("layer 2"), @@ -819,7 +819,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( // Create a layer 3 disk out of the layer 2 snapshot let layer_3_disk_name: Name = "layer-3-disk".parse().unwrap(); - let layer_3_disk = params::DiskCreate { + let layer_3_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: layer_3_disk_name.clone(), description: String::from("layer 3"), @@ -1172,7 +1172,7 @@ async fn delete_image_test( let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk_name: Name = "base-disk".parse().unwrap(); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("all your base disk are belong to us"), @@ -2467,7 +2467,7 @@ async fn test_disk_create_saga_unwinds_correctly( .set_region_creation_error(true); let disk_size = ByteCount::from_gibibytes_u32(2); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -2511,7 +2511,7 @@ async fn test_snapshot_create_saga_unwinds_correctly( // Create a disk let disk_size = ByteCount::from_gibibytes_u32(2); - let base_disk = params::DiskCreate { + let base_disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: base_disk_name.clone(), description: String::from("sells rainsticks"), @@ -3311,7 +3311,7 @@ async fn test_cte_returns_regions(cptestctx: &ControlPlaneTestContext) { create_project_and_pool(client).await; let disks_url = get_disks_url(); - let disk = params::DiskCreate { + let disk = params::DiskCreate::Crucible { identity: IdentityMetadataCreateParams { name: "disk".parse().unwrap(), description: String::from("disk"), @@ -3345,7 +3345,10 @@ async fn test_cte_returns_regions(cptestctx: &ControlPlaneTestContext) { let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk_id) .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -3930,7 +3933,10 @@ async fn test_read_only_region_reference_counting( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -3984,7 +3990,10 @@ async fn test_read_only_region_reference_counting( "disk_from_snapshot {:?} should exist", disk_from_snapshot.identity.id ) - }); + }) + else { + unreachable!() + }; let read_only_region_address: SocketAddrV6 = nexus.region_addr(&opctx.log, read_only_region.id()).await.unwrap(); @@ -4196,7 +4205,10 @@ async fn test_read_only_region_reference_counting_layers( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -4245,7 +4257,10 @@ async fn test_read_only_region_reference_counting_layers( "disk_from_snapshot {:?} should exist", disk_from_snapshot.identity.id ) - }); + }) + else { + unreachable!() + }; let read_only_region_address: SocketAddrV6 = nexus.region_addr(&opctx.log, read_only_region.id()).await.unwrap(); @@ -4427,7 +4442,10 @@ async fn test_volume_replace_snapshot_respects_accounting( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -4632,7 +4650,10 @@ async fn test_volume_remove_rop_respects_accounting( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -4666,7 +4687,10 @@ async fn test_volume_remove_rop_respects_accounting( "disk_from_snapshot {:?} should exist", disk_from_snapshot.identity.id ) - }); + }) + else { + unreachable!() + }; // Assert the correct volume resource usage records before the removal: // both the snapshot volume and disk_from_snapshot volume should have usage @@ -4793,7 +4817,10 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let disk_allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -4827,7 +4854,10 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( "disk_from_snapshot {:?} should exist", disk_from_snapshot.identity.id ) - }); + }) + else { + unreachable!() + }; let another_disk_from_snapshot = create_disk_from_snapshot( &client, @@ -4845,7 +4875,10 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( "another_disk_from_snapshot {:?} should exist", another_disk_from_snapshot.identity.id ) - }); + }) + else { + unreachable!() + }; // Assert the correct volume resource usage records before the removal: the // snapshot volume, disk_from_snapshot volume, and @@ -5446,7 +5479,10 @@ async fn test_double_layer_with_read_only_region_delete( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -5554,7 +5590,10 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( let Disk::Crucible(db_disk) = datastore .disk_get(&opctx, disk.identity.id) .await - .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); + .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)) + else { + unreachable!() + }; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 91f2e867383..40b2e75a458 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -1171,7 +1171,7 @@ impl Default for InstanceNetworkInterfaceAttachment { /// Describe the instance's disks at creation time #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(tag = "type", rename_all = "snake_case")] +#[serde(tag = "type", content = "content", rename_all = "snake_case")] pub enum InstanceDiskAttachment { /// During instance creation, create and attach disks Create(DiskCreate), @@ -1184,7 +1184,7 @@ impl InstanceDiskAttachment { /// Get the name of the disk described by this attachment. pub fn name(&self) -> Name { match self { - Self::Create(create) => create.identity.name.clone(), + Self::Create(create) => create.identity().name.clone(), Self::Attach(InstanceDiskAttach { name }) => name.clone(), } } @@ -1731,7 +1731,7 @@ impl From for PhysicalDiskKind { } } -/// Different sources for a disk +/// Different sources for a Crucible Disk #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] #[serde(tag = "type", rename_all = "snake_case")] pub enum DiskSource { @@ -1751,14 +1751,44 @@ pub enum DiskSource { /// Create-time parameters for a `Disk` #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct DiskCreate { - /// The common identifying metadata for the disk - #[serde(flatten)] - pub identity: IdentityMetadataCreateParams, - /// The initial source for this disk - pub disk_source: DiskSource, - /// The total size of the Disk (in bytes) - pub size: ByteCount, +#[serde(tag = "type", rename_all = "snake_case")] +pub enum DiskCreate { + Crucible { + /// The common identifying metadata for the disk + #[serde(flatten)] + identity: IdentityMetadataCreateParams, + + /// The initial source for this disk + disk_source: DiskSource, + + /// The total size of the Disk (in bytes) + size: ByteCount, + }, + + LocalStorage { + /// The common identifying metadata for the disk + #[serde(flatten)] + identity: IdentityMetadataCreateParams, + + /// The total size of the Disk (in bytes) + size: ByteCount, + }, +} + +impl DiskCreate { + pub fn identity(&self) -> &IdentityMetadataCreateParams { + match &self { + DiskCreate::Crucible { identity, .. } => identity, + DiskCreate::LocalStorage { identity, .. } => identity, + } + } + + pub fn size(&self) -> ByteCount { + match &self { + DiskCreate::Crucible { size, .. } => *size, + DiskCreate::LocalStorage { size, .. } => *size, + } + } } // equivalent to crucible_pantry_client::types::ExpectedDigest diff --git a/openapi/nexus.json b/openapi/nexus.json index 621cb8ccaf5..3db0e0234a1 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -18410,36 +18410,80 @@ }, "DiskCreate": { "description": "Create-time parameters for a `Disk`", - "type": "object", - "properties": { - "description": { - "type": "string" - }, - "disk_source": { - "description": "The initial source for this disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskSource" + "oneOf": [ + { + "description": "Create-time identity-related parameters", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "disk_source": { + "description": "The initial source for this disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskSource" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "size": { + "description": "The total size of the Disk (in bytes)", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] } + }, + "required": [ + "description", + "disk_source", + "name", + "size", + "type" ] }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "size": { - "description": "The total size of the Disk (in bytes)", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" + { + "description": "Create-time identity-related parameters", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "size": { + "description": "The total size of the Disk (in bytes)", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "local_storage" + ] } + }, + "required": [ + "description", + "name", + "size", + "type" ] } - }, - "required": [ - "description", - "disk_source", - "name", - "size" ] }, "DiskPath": { @@ -18480,7 +18524,7 @@ ] }, "DiskSource": { - "description": "Different sources for a disk", + "description": "Different sources for a Crucible Disk", "oneOf": [ { "description": "Create a blank disk", @@ -18770,7 +18814,8 @@ "DiskType": { "type": "string", "enum": [ - "crucible" + "crucible", + "local_storage" ] }, "Distributiondouble": { @@ -21178,6 +21223,22 @@ "ncpus" ] }, + "InstanceDiskAttach": { + "type": "object", + "properties": { + "name": { + "description": "A disk name to attach", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "name" + ] + }, "InstanceDiskAttachment": { "description": "Describe the instance's disks at creation time", "oneOf": [ @@ -21185,27 +21246,8 @@ "description": "During instance creation, create and attach disks", "type": "object", "properties": { - "description": { - "type": "string" - }, - "disk_source": { - "description": "The initial source for this disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskSource" - } - ] - }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "size": { - "description": "The total size of the Disk (in bytes)", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] + "content": { + "$ref": "#/components/schemas/DiskCreate" }, "type": { "type": "string", @@ -21215,10 +21257,7 @@ } }, "required": [ - "description", - "disk_source", - "name", - "size", + "content", "type" ] }, @@ -21226,13 +21265,8 @@ "description": "During instance creation, attach this disk", "type": "object", "properties": { - "name": { - "description": "A disk name to attach", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] + "content": { + "$ref": "#/components/schemas/InstanceDiskAttach" }, "type": { "type": "string", @@ -21242,7 +21276,7 @@ } }, "required": [ - "name", + "content", "type" ] } diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 9ce28021a5f..37133484e0c 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1476,7 +1476,8 @@ CREATE TYPE IF NOT EXISTS omicron.public.block_size AS ENUM ( ); CREATE TYPE IF NOT EXISTS omicron.public.disk_type AS ENUM ( - 'crucible' + 'crucible', + 'local_storage' ); CREATE TABLE IF NOT EXISTS omicron.public.disk ( @@ -7369,6 +7370,34 @@ CREATE INDEX IF NOT EXISTS multicast_member_parent_state ON omicron.public.multi state ) WHERE time_deleted IS NULL; +CREATE TABLE IF NOT EXISTS omicron.public.disk_type_local_storage ( + disk_id UUID PRIMARY KEY, + + required_dataset_overhead INT8 NOT NULL, + + local_storage_dataset_allocation_id UUID +); + +CREATE TABLE IF NOT EXISTS omicron.public.local_storage_dataset_allocation ( + id UUID PRIMARY KEY, + + time_created TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + + local_storage_dataset_id UUID NOT NULL, + pool_id UUID NOT NULL, + sled_id UUID NOT NULL, + + dataset_size INT8 NOT NULL +); + +CREATE INDEX IF NOT EXISTS + lookup_local_storage_dataset_allocation_by_dataset +ON + omicron.public.local_storage_dataset_allocation (local_storage_dataset_id) +WHERE + time_deleted IS NULL; + -- Keep this at the end of file so that the database does not contain a version -- until it is fully populated. INSERT INTO omicron.public.db_metadata ( @@ -7378,7 +7407,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '209.0.0', NULL) + (TRUE, NOW(), NOW(), '210.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/local-storage-disk-type/up01.sql b/schema/crdb/local-storage-disk-type/up01.sql new file mode 100644 index 00000000000..57fede72472 --- /dev/null +++ b/schema/crdb/local-storage-disk-type/up01.sql @@ -0,0 +1,6 @@ +ALTER TYPE + omicron.public.disk_type +ADD VALUE IF NOT EXISTS + 'local_storage' +AFTER + 'crucible' diff --git a/schema/crdb/local-storage-disk-type/up02.sql b/schema/crdb/local-storage-disk-type/up02.sql new file mode 100644 index 00000000000..a1d34705771 --- /dev/null +++ b/schema/crdb/local-storage-disk-type/up02.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS omicron.public.disk_type_local_storage ( + disk_id UUID PRIMARY KEY, + + required_dataset_overhead INT8 NOT NULL, + + local_storage_dataset_allocation_id UUID +); diff --git a/schema/crdb/local-storage-disk-type/up03.sql b/schema/crdb/local-storage-disk-type/up03.sql new file mode 100644 index 00000000000..5953b1d0d4e --- /dev/null +++ b/schema/crdb/local-storage-disk-type/up03.sql @@ -0,0 +1,12 @@ +CREATE TABLE IF NOT EXISTS omicron.public.local_storage_dataset_allocation ( + id UUID PRIMARY KEY, + + time_created TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + + local_storage_dataset_id UUID NOT NULL, + pool_id UUID NOT NULL, + sled_id UUID NOT NULL, + + dataset_size INT8 NOT NULL +); From e141a46fda05ae0bd5c5840f84bf435fe2a03801 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Mon, 17 Nov 2025 21:56:10 +0000 Subject: [PATCH 02/11] print generic disk information for local storage disks --- dev-tools/omdb/src/bin/omdb/db.rs | 96 ++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 3 deletions(-) diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 64ae3d4d5c9..fdf7d05ea47 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -2218,8 +2218,7 @@ async fn crucible_disk_info( } } } else { - // If the disk is not attached to anything, just print empty - // fields. + // If the disk is not attached to anything, just print empty fields. UpstairsRow { host_serial: "-".to_string(), disk_name, @@ -2280,8 +2279,97 @@ async fn crucible_disk_info( } async fn local_storage_disk_info( + opctx: &OpContext, + datastore: &DataStore, disk: LocalStorageDisk, ) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct GenericRow { + host_serial: String, + disk_name: String, + instance_name: String, + propolis_zone: String, + disk_state: String, + } + + let conn = datastore.pool_connection_for_tests().await?; + + let disk_name = disk.name().to_string(); + let disk_state = disk.runtime().disk_state.to_string(); + + let row = if let Some(instance_uuid) = disk.runtime().attach_instance_id { + // Get the instance this disk is attached to + use nexus_db_schema::schema::instance::dsl as instance_dsl; + use nexus_db_schema::schema::vmm::dsl as vmm_dsl; + let instances: Vec = instance_dsl::instance + .filter(instance_dsl::id.eq(instance_uuid)) + .left_join( + vmm_dsl::vmm.on(vmm_dsl::id + .nullable() + .eq(instance_dsl::active_propolis_id) + .and(vmm_dsl::time_deleted.is_null())), + ) + .limit(1) + .select((Instance::as_select(), Option::::as_select())) + .load_async(&*conn) + .await + .context("loading requested instance")? + .into_iter() + .map(|i: (Instance, Option)| i.into()) + .collect(); + + let Some(instance) = instances.into_iter().next() else { + bail!("no instance: {} found", instance_uuid); + }; + + let instance_name = instance.instance().name().to_string(); + + if instance.vmm().is_some() { + let propolis_id = + instance.instance().runtime().propolis_id.unwrap(); + let my_sled_id = instance.sled_id().unwrap(); + + let (_, my_sled) = LookupPath::new(opctx, datastore) + .sled_id(my_sled_id) + .fetch() + .await + .context("failed to look up sled")?; + + GenericRow { + host_serial: my_sled.serial_number().to_string(), + disk_name, + instance_name, + propolis_zone: format!("oxz_propolis-server_{}", propolis_id), + disk_state, + } + } else { + GenericRow { + host_serial: NOT_ON_SLED_MSG.to_string(), + disk_name, + instance_name, + propolis_zone: NO_ACTIVE_PROPOLIS_MSG.to_string(), + disk_state, + } + } + } else { + // If the disk is not attached to anything, just print empty fields. + GenericRow { + host_serial: "-".to_string(), + disk_name, + instance_name: "-".to_string(), + propolis_zone: "-".to_string(), + disk_state, + } + }; + + let table = tabled::Table::new(vec![row]) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct Row { @@ -2335,7 +2423,9 @@ async fn cmd_db_disk_info( Disk::Crucible(disk) => { crucible_disk_info(opctx, datastore, disk).await } - Disk::LocalStorage(disk) => local_storage_disk_info(disk).await, + Disk::LocalStorage(disk) => { + local_storage_disk_info(opctx, datastore, disk).await + } } } From f7c8bd75c3e00780b2385fec6a1b6aa7eb7b3d10 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Mon, 17 Nov 2025 22:04:58 +0000 Subject: [PATCH 03/11] missing index --- schema/crdb/local-storage-disk-type/up04.sql | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 schema/crdb/local-storage-disk-type/up04.sql diff --git a/schema/crdb/local-storage-disk-type/up04.sql b/schema/crdb/local-storage-disk-type/up04.sql new file mode 100644 index 00000000000..2d34083d522 --- /dev/null +++ b/schema/crdb/local-storage-disk-type/up04.sql @@ -0,0 +1,6 @@ +CREATE INDEX IF NOT EXISTS + lookup_local_storage_dataset_allocation_by_dataset +ON + omicron.public.local_storage_dataset_allocation (local_storage_dataset_id) +WHERE + time_deleted IS NULL; From 135f6a91b7e15255b7820e2b19c0664dad2a652c Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Thu, 20 Nov 2025 22:36:53 +0000 Subject: [PATCH 04/11] validate DiskTypeLocalStorage can be created before saga --- nexus/src/app/disk.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 2608c9aaf58..9df6d25fa55 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -8,6 +8,7 @@ use crate::app::sagas; use crate::external_api::params; use nexus_db_lookup::LookupPath; use nexus_db_lookup::lookup; +use nexus_db_model::DiskTypeLocalStorage; use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; @@ -226,7 +227,7 @@ impl super::Nexus { } } - params::DiskCreate::LocalStorage { .. } => { + params::DiskCreate::LocalStorage { size, .. } => { // If a user requests some outlandish number of TB for local // storage, and there isn't a sled allocation that can fulfill // this, instance create will work but instance start (which @@ -253,6 +254,22 @@ impl super::Nexus { // TODO consult the latest inventory collection if it isn't too // expensive, and reject local storage disk requests that are // too large to be served by any sled. + + // Test that the disk create saga can create the + // DiskTypeLocalStorage record. This won't be the record + // actually used by the created disk, but validation here can be + // returned to a user with a non-500 error, and validation + // failure in a saga will only show up as a 500. + + if let Err(e) = DiskTypeLocalStorage::new(Uuid::new_v4(), *size) + { + return Err(Error::invalid_value( + "size", + format!( + "error computing required dataset overhead: {e}" + ), + )); + } } } From 950ba7343c1b84de431d887645c318b6eb572b8d Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 22:51:12 +0000 Subject: [PATCH 05/11] internal_context and try not wrapping in Some --- nexus/db-queries/src/db/datastore/disk.rs | 67 +++++++++++++---------- 1 file changed, 38 insertions(+), 29 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index fcd43ee5f06..f014aafff33 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -320,6 +320,9 @@ impl DataStore { .await .map_err(|e| { public_error_from_diesel(e, ErrorHandler::Server) + .internal_context(format!( + "{disk_id} missing disk_type_crucible record" + )) })?; Disk::Crucible(CrucibleDisk { disk, disk_type_crucible }) @@ -335,6 +338,10 @@ impl DataStore { .await .map_err(|e| { public_error_from_diesel(e, ErrorHandler::Server) + .internal_context(format!( + "{disk_id} missing disk_type_local_storage \ + record" + )) })?; let local_storage_dataset_allocation = if let Some( @@ -345,19 +352,20 @@ impl DataStore { { use nexus_db_schema::schema::local_storage_dataset_allocation::dsl; - Some( - dsl::local_storage_dataset_allocation - .filter(dsl::id.eq(to_db_typed_uuid(allocation_id))) - .select(LocalStorageDatasetAllocation::as_select()) - .first_async(&*conn) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Server, - ) - })?, - ) + let allocation = dsl::local_storage_dataset_allocation + .filter(dsl::id.eq(to_db_typed_uuid(allocation_id))) + .select(LocalStorageDatasetAllocation::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context(format!( + "local storage disk {disk_id} missing \ + allocation {allocation_id}" + )) + })?; + + Some(allocation) } else { None }; @@ -483,23 +491,24 @@ impl DataStore { { use nexus_db_schema::schema::local_storage_dataset_allocation::dsl; - Some( - dsl::local_storage_dataset_allocation - .filter( - dsl::id.eq(to_db_typed_uuid(allocation_id)), - ) - .select( - LocalStorageDatasetAllocation::as_select(), + let allocation = dsl::local_storage_dataset_allocation + .filter(dsl::id.eq(to_db_typed_uuid(allocation_id))) + .select(LocalStorageDatasetAllocation::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::Server, ) - .first_async(&*conn) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Server, - ) - })?, - ) + .internal_context(format!( + "local storage disk {} missing \ + allocation {allocation_id}", + disk.id(), + )) + })?; + + Some(allocation) } else { None }; From e970dfe8effd9cf97d1b29605c022cff5177a4e7 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 22:55:22 +0000 Subject: [PATCH 06/11] more descriptive error, also owned string --- nexus/db-queries/src/db/datastore/disk.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index f014aafff33..864b509458d 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -595,10 +595,15 @@ impl DataStore { if local_storage_dataset_allocation.is_some() { // This allocation is currently only performed during // instance allocation, return an error here. - return Err(err.bail(Error::internal_error(&format!( - "local storage disk {} has an allocation!", - disk.id(), - )))); + return Err(err.bail(Error::InternalError { + internal_message: format!( + "local storage dataset allocation is only \ + performed during instance allocation, but {} is \ + being created with an allocation when it should \ + be None", + disk.id() + ), + })); } use nexus_db_schema::schema::disk_type_local_storage::dsl; From 0fe00c8451c7b1821c9470d3dfcb7ddff0eee88b Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 22:58:59 +0000 Subject: [PATCH 07/11] Error::internal_error rampage --- nexus/src/app/disk.rs | 74 ++++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 29 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 9df6d25fa55..0568e7632a6 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -300,7 +300,9 @@ impl super::Nexus { let disk_created = saga_outputs .lookup_node_output::("created_disk") - .map_err(|e| Error::internal_error(&format!("{:#}", &e))) + .map_err(|e| Error::InteralError { + internal_message: format!("{:#}", &e), + }) .internal_context("looking up output from disk create saga")?; Ok(disk_created) @@ -423,10 +425,12 @@ impl super::Nexus { } datastore::Disk::LocalStorage(_) => { - return Err(Error::internal_error(&format!( - "cannot remove rop for local storage disk {}", - disk.id() - ))); + return Err(Error::InternalError { + internal_message: format!( + "cannot remove rop for local storage disk {}", + disk.id() + ), + }); } } @@ -452,10 +456,12 @@ impl super::Nexus { } db::model::DiskType::LocalStorage => { - return Err(Error::internal_error(&format!( - "cannot import to local storage disk {}", - authz_disk.id() - ))); + return Err(Error::InternalError { + internal_message: format!( + "cannot import to local storage disk {}", + authz_disk.id() + ), + }); } } @@ -500,10 +506,12 @@ impl super::Nexus { db::datastore::Disk::Crucible(disk) => disk, db::datastore::Disk::LocalStorage(_) => { - return Err(Error::internal_error(&format!( - "cannot import to local storage disk {}", - authz_disk.id() - ))); + return Err(Error::InternalError { + internal_message: format!( + "cannot import to local storage disk {}", + authz_disk.id() + ), + }); } }; @@ -599,20 +607,24 @@ impl super::Nexus { } } - _ => Error::internal_error(&format!( - "error sending bulk write to pantry: {}", - e, - )), + _ => Error::InternalError { + internal_message: format!( + "error sending bulk write to pantry: {}", + e, + ), + }, }, )?; Ok(()) } else { error!(self.log, "disk {} has no pantry address!", disk.id()); - Err(Error::internal_error(&format!( - "disk {} has no pantry address!", - disk.id(), - ))) + Err(Error::InternalError { + internal_message: format!( + "disk {} has no pantry address!", + disk.id(), + ), + }) } } @@ -636,10 +648,12 @@ impl super::Nexus { } db::model::DiskType::LocalStorage => { - return Err(Error::internal_error(&format!( - "cannot import to local storage disk {}", - authz_disk.id() - ))); + return Err(Error::InternalError { + internal_message: format!( + "cannot import to local storage disk {}", + authz_disk.id() + ), + }); } } @@ -684,10 +698,12 @@ impl super::Nexus { datastore::Disk::Crucible(disk) => disk, datastore::Disk::LocalStorage(_) => { - return Err(Error::internal_error(&format!( - "cannot finalize local storage disk {}", - authz_disk.id() - ))); + return Err(Error::InteralError { + internal_message: format!( + "cannot finalize local storage disk {}", + authz_disk.id() + ), + }); } }; From 658be0b8e73a705174c32ab2d2efd823bf2ce379 Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 23:00:09 +0000 Subject: [PATCH 08/11] what is it --- nexus/src/app/instance.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 1922e7eabf1..7ce1ff58267 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -824,7 +824,7 @@ impl super::Nexus { // Cannot migrate instance if it has local storage if self.instance_uses_local_storage(opctx, &authz_instance).await? { return Err(Error::invalid_request(format!( - "cannot migrate {} as it uses local storage", + "cannot migrate instance {} as it uses local storage", authz_instance.id() ))); } From 355efd81f33f581d98c33df4ddf48e5ca897915e Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 23:01:19 +0000 Subject: [PATCH 09/11] turbo --- nexus/tests/integration_tests/local_storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/tests/integration_tests/local_storage.rs b/nexus/tests/integration_tests/local_storage.rs index d8635d21b4d..2318ced5f13 100644 --- a/nexus/tests/integration_tests/local_storage.rs +++ b/nexus/tests/integration_tests/local_storage.rs @@ -28,7 +28,7 @@ type DiskTest<'a> = static PROJECT_NAME: &str = "springfield-squidport"; fn get_project_selector() -> String { - format!("project={}", PROJECT_NAME) + format!("project={PROJECT_NAME}") } fn get_disks_url() -> String { From f3259d58dbd05575755351620b3a17269eedd86a Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Fri, 21 Nov 2025 23:02:04 +0000 Subject: [PATCH 10/11] wrap to 80 --- nexus/tests/integration_tests/local_storage.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nexus/tests/integration_tests/local_storage.rs b/nexus/tests/integration_tests/local_storage.rs index 2318ced5f13..434a4bc0335 100644 --- a/nexus/tests/integration_tests/local_storage.rs +++ b/nexus/tests/integration_tests/local_storage.rs @@ -81,7 +81,8 @@ async fn test_reject_creating_local_storage_disk( .unwrap(); assert_eq!( error.message, - "unsupported value for \"size and block_size\": total size must be a multiple of block size 4096", + "unsupported value for \"size and block_size\": total size must be a \ + multiple of block size 4096", ); // Reject disks where the MIN_DISK_SIZE_BYTES doesn't evenly divide @@ -109,7 +110,8 @@ async fn test_reject_creating_local_storage_disk( .unwrap(); assert_eq!( error.message, - "unsupported value for \"size\": total size must be a multiple of 1 GiB", + "unsupported value for \"size\": total size must be a multiple of 1 \ + GiB", ); } From b67ddc4d89b93f6d660fdcaf52e8b6f9b700540b Mon Sep 17 00:00:00 2001 From: James MacMahon Date: Sat, 22 Nov 2025 01:36:26 +0000 Subject: [PATCH 11/11] fmt and clippy --- nexus/src/app/disk.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 0568e7632a6..f133e5e1035 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -300,8 +300,8 @@ impl super::Nexus { let disk_created = saga_outputs .lookup_node_output::("created_disk") - .map_err(|e| Error::InteralError { - internal_message: format!("{:#}", &e), + .map_err(|e| Error::InternalError { + internal_message: format!("{e:#}"), }) .internal_context("looking up output from disk create saga")?; @@ -609,8 +609,7 @@ impl super::Nexus { _ => Error::InternalError { internal_message: format!( - "error sending bulk write to pantry: {}", - e, + "error sending bulk write to pantry: {e}" ), }, }, @@ -698,7 +697,7 @@ impl super::Nexus { datastore::Disk::Crucible(disk) => disk, datastore::Disk::LocalStorage(_) => { - return Err(Error::InteralError { + return Err(Error::InternalError { internal_message: format!( "cannot finalize local storage disk {}", authz_disk.id()