diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index 92da7a208d2..f2ae4cd3bc6 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -139,6 +139,9 @@ enum EnsureDatasetErrorRaw { #[error("Unexpected output from ZFS commands: {0}")] Output(String), + #[error("Dataset does not exist")] + DoesNotExist, + #[error("Failed to mount filesystem")] MountFsFailed(#[source] crate::ExecutionError), @@ -220,18 +223,11 @@ pub struct Zfs {} /// Describes a mountpoint for a ZFS filesystem. #[derive(Debug, Clone)] -pub enum Mountpoint { - #[allow(dead_code)] - Legacy, - Path(Utf8PathBuf), -} +pub struct Mountpoint(pub Utf8PathBuf); impl fmt::Display for Mountpoint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Mountpoint::Legacy => write!(f, "legacy"), - Mountpoint::Path(p) => write!(f, "{p}"), - } + write!(f, "{}", self.0) } } @@ -507,12 +503,22 @@ fn build_zfs_set_key_value_pairs( } /// Describes the ZFS "canmount" options. +#[derive(Copy, Clone, Debug)] pub enum CanMount { On, Off, NoAuto, } +impl CanMount { + fn wants_mounting(&self) -> bool { + match self { + CanMount::On => true, + CanMount::Off | CanMount::NoAuto => false, + } + } +} + /// Arguments to [Zfs::ensure_dataset]. pub struct DatasetEnsureArgs<'a> { /// The full path of the ZFS dataset. @@ -796,6 +802,21 @@ fn is_directory_immutable( return Ok(result); } +struct DatasetMountInfo { + exists: bool, + mounted: bool, +} + +impl DatasetMountInfo { + fn exists(mounted: bool) -> Self { + Self { exists: true, mounted } + } + + fn does_not_exist() -> Self { + Self { exists: false, mounted: false } + } +} + impl Zfs { /// Lists all datasets within a pool or existing dataset. /// @@ -900,6 +921,50 @@ impl Zfs { Ok(()) } + /// Ensures that a ZFS dataset is mounted + /// + /// Returns an error if the dataset exists, but cannot be mounted. + pub fn ensure_dataset_mounted_and_exists( + name: &str, + mountpoint: &Mountpoint, + ) -> Result<(), EnsureDatasetError> { + Self::ensure_dataset_mounted_and_exists_inner(name, mountpoint) + .map_err(|err| EnsureDatasetError { + name: name.to_string(), + err, + })?; + Ok(()) + } + + fn ensure_dataset_mounted_and_exists_inner( + name: &str, + mountpoint: &Mountpoint, + ) -> Result<(), EnsureDatasetErrorRaw> { + let mount_info = Self::dataset_exists(name, mountpoint)?; + if !mount_info.exists { + return Err(EnsureDatasetErrorRaw::DoesNotExist); + } + + if !mount_info.mounted { + Self::ensure_dataset_mounted(name, mountpoint)?; + } + return Ok(()); + } + + fn ensure_dataset_mounted( + name: &str, + mountpoint: &Mountpoint, + ) -> Result<(), EnsureDatasetErrorRaw> { + ensure_empty_immutable_mountpoint(&mountpoint.0).map_err(|err| { + EnsureDatasetErrorRaw::MountpointCreation { + mountpoint: mountpoint.0.to_path_buf(), + err, + } + })?; + Self::mount_dataset(name)?; + Ok(()) + } + /// Creates a new ZFS dataset unless one already exists. /// /// Refer to [DatasetEnsureArgs] for details on the supplied arguments. @@ -923,53 +988,43 @@ impl Zfs { additional_options, }: DatasetEnsureArgs, ) -> Result<(), EnsureDatasetErrorRaw> { - let (exists, mounted) = Self::dataset_exists(name, &mountpoint)?; + let dataset_info = Self::dataset_exists(name, &mountpoint)?; + // Non-zoned datasets with an explicit mountpoint and the + // "canmount=on" property should be mounted within the global zone. + // + // Zoned datasets are mounted when their zones are booted, so + // we don't do this mountpoint manipulation for them. + let wants_mounting = + !zoned && !dataset_info.mounted && can_mount.wants_mounting(); let props = build_zfs_set_key_value_pairs(size_details, id); - if exists { + + if dataset_info.exists { // If the dataset already exists: Update properties which might // have changed, and ensure it has been mounted if it needs // to be mounted. Self::set_values(name, props.as_slice()) .map_err(|err| EnsureDatasetErrorRaw::from(err.err))?; - if !zoned && !mounted { - if let (CanMount::On, Mountpoint::Path(path)) = - (&can_mount, &mountpoint) - { - ensure_empty_immutable_mountpoint(&path).map_err( - |err| EnsureDatasetErrorRaw::MountpointCreation { - mountpoint: path.to_path_buf(), - err, - }, - )?; - Self::mount_dataset(name)?; - } + if wants_mounting { + Self::ensure_dataset_mounted(name, &mountpoint)?; } + return Ok(()); } // If the dataset doesn't exist, create it. - // Non-zoned datasets with an explicit mountpoint and the - // "canmount=on" property should be mounted within the global zone. - // // We'll ensure they have an empty immutable mountpoint before // creating the dataset itself, which will also mount it. - // - // Zoned datasets are mounted when their zones are booted, so - // we don't do this mountpoint manipulation for them. - if !zoned { - if let (CanMount::On, Mountpoint::Path(path)) = - (&can_mount, &mountpoint) - { - ensure_empty_immutable_mountpoint(&path).map_err(|err| { - EnsureDatasetErrorRaw::MountpointCreation { - mountpoint: path.to_path_buf(), - err, - } - })?; - } + if wants_mounting { + let path = &mountpoint.0; + ensure_empty_immutable_mountpoint(&path).map_err(|err| { + EnsureDatasetErrorRaw::MountpointCreation { + mountpoint: path.to_path_buf(), + err, + } + })?; } let mut command = std::process::Command::new(PFEXEC); @@ -1048,7 +1103,7 @@ impl Zfs { fn dataset_exists( name: &str, mountpoint: &Mountpoint, - ) -> Result<(bool, bool), EnsureDatasetErrorRaw> { + ) -> Result { let mut command = std::process::Command::new(ZFS); let cmd = command.args(&[ "list", @@ -1064,9 +1119,9 @@ impl Zfs { return Err(EnsureDatasetErrorRaw::Output(stdout.to_string())); } let mounted = values[3] == "yes"; - Ok((true, mounted)) + Ok(DatasetMountInfo::exists(mounted)) } else { - Ok((false, false)) + Ok(DatasetMountInfo::does_not_exist()) } } diff --git a/sled-agent/src/backing_fs.rs b/sled-agent/src/backing_fs.rs index 7c227e4c5ab..b6c5e5ce6bd 100644 --- a/sled-agent/src/backing_fs.rs +++ b/sled-agent/src/backing_fs.rs @@ -134,7 +134,7 @@ pub(crate) fn ensure_backing_fs( sled_storage::dataset::M2_BACKING_DATASET, bfs.name ); - let mountpoint = Mountpoint::Path(Utf8PathBuf::from(bfs.mountpoint)); + let mountpoint = Mountpoint(Utf8PathBuf::from(bfs.mountpoint)); info!(log, "Ensuring dataset {}", dataset); diff --git a/sled-agent/src/bootstrap/pre_server.rs b/sled-agent/src/bootstrap/pre_server.rs index 41f5993298a..35a8c1f3b86 100644 --- a/sled-agent/src/bootstrap/pre_server.rs +++ b/sled-agent/src/bootstrap/pre_server.rs @@ -275,7 +275,7 @@ fn ensure_zfs_key_directory_exists(log: &Logger) -> Result<(), StartError> { fn ensure_zfs_ramdisk_dataset() -> Result<(), StartError> { Zfs::ensure_dataset(zfs::DatasetEnsureArgs { name: zfs::ZONE_ZFS_RAMDISK_DATASET, - mountpoint: zfs::Mountpoint::Path(Utf8PathBuf::from( + mountpoint: zfs::Mountpoint(Utf8PathBuf::from( zfs::ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT, )), can_mount: zfs::CanMount::On, diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index c669684814d..a8b1f26384b 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -22,6 +22,8 @@ use crucible_agent_client::types::{ }; use dropshot::HandlerTaskMode; use dropshot::HttpError; +use illumos_utils::zfs::DatasetProperties; +use omicron_common::api::external::ByteCount; use omicron_common::disk::DatasetManagementStatus; use omicron_common::disk::DatasetName; use omicron_common::disk::DatasetsConfig; @@ -1362,6 +1364,54 @@ impl StorageInner { Ok(config.clone()) } + pub fn dataset_get( + &self, + dataset_name: &String, + ) -> Result { + let Some(config) = self.dataset_config.as_ref() else { + return Err(HttpError::for_not_found( + None, + "No control plane datasets".into(), + )); + }; + + for (id, dataset) in config.datasets.iter() { + if dataset.name.full_name().as_str() == dataset_name { + return Ok(DatasetProperties { + id: Some(*id), + name: dataset_name.to_string(), + mounted: true, + avail: ByteCount::from_kibibytes_u32(1024), + used: ByteCount::from_kibibytes_u32(1024), + quota: dataset.inner.quota, + reservation: dataset.inner.reservation, + compression: dataset.inner.compression.to_string(), + }); + } + } + + for (nested_dataset_name, nested_dataset_storage) in + self.nested_datasets.iter() + { + if nested_dataset_name.full_name().as_str() == dataset_name { + let config = &nested_dataset_storage.config.inner; + + return Ok(DatasetProperties { + id: None, + name: dataset_name.to_string(), + mounted: true, + avail: ByteCount::from_kibibytes_u32(1024), + used: ByteCount::from_kibibytes_u32(1024), + quota: config.quota, + reservation: config.reservation, + compression: config.compression.to_string(), + }); + } + } + + return Err(HttpError::for_not_found(None, "Dataset not found".into())); + } + pub fn datasets_ensure( &mut self, config: DatasetsConfig, diff --git a/sled-agent/src/support_bundle/storage.rs b/sled-agent/src/support_bundle/storage.rs index 4060bbcaeaf..4040823fda6 100644 --- a/sled-agent/src/support_bundle/storage.rs +++ b/sled-agent/src/support_bundle/storage.rs @@ -7,13 +7,16 @@ use async_trait::async_trait; use bytes::Bytes; use camino::Utf8Path; +use camino::Utf8PathBuf; use dropshot::Body; use dropshot::HttpError; use futures::Stream; use futures::StreamExt; +use illumos_utils::zfs::DatasetProperties; use omicron_common::api::external::Error as ExternalError; use omicron_common::disk::CompressionAlgorithm; use omicron_common::disk::DatasetConfig; +use omicron_common::disk::DatasetName; use omicron_common::disk::DatasetsConfig; use omicron_common::disk::SharedDatasetConfig; use omicron_uuid_kinds::DatasetUuid; @@ -64,6 +67,12 @@ pub enum Error { #[error("Dataset not found")] DatasetNotFound, + #[error("Could not look up dataset")] + DatasetLookup(#[source] anyhow::Error), + + #[error("Cannot access dataset {dataset:?} which is not mounted")] + DatasetNotMounted { dataset: DatasetName }, + #[error( "Dataset exists, but has an invalid configuration: (wanted {wanted}, saw {actual})" )] @@ -133,6 +142,19 @@ pub trait LocalStorage: Sync { /// Returns all configured datasets async fn dyn_datasets_config_list(&self) -> Result; + /// Returns properties about a dataset + fn dyn_dataset_get( + &self, + dataset_name: &String, + ) -> Result; + + /// Ensure a dataset is mounted + async fn dyn_ensure_mounted_and_get_mountpoint( + &self, + dataset: NestedDatasetLocation, + mount_root: &Utf8Path, + ) -> Result; + /// Returns all nested datasets within an existing dataset async fn dyn_nested_dataset_list( &self, @@ -166,6 +188,38 @@ impl LocalStorage for StorageHandle { self.datasets_config_list().await.map_err(|err| err.into()) } + fn dyn_dataset_get( + &self, + dataset_name: &String, + ) -> Result { + let Some(dataset) = illumos_utils::zfs::Zfs::get_dataset_properties( + &[dataset_name.clone()], + illumos_utils::zfs::WhichDatasets::SelfOnly, + ) + .map_err(|err| Error::DatasetLookup(err))? + .pop() else { + // This should not be possible, unless the "zfs get" command is + // behaving unpredictably. We're only asking for a single dataset, + // so on success, we should see the result of that dataset. + return Err(Error::DatasetLookup(anyhow::anyhow!( + "Zfs::get_dataset_properties returned an empty vec?" + ))); + }; + + Ok(dataset) + } + + async fn dyn_ensure_mounted_and_get_mountpoint( + &self, + dataset: NestedDatasetLocation, + mount_root: &Utf8Path, + ) -> Result { + dataset + .ensure_mounted_and_get_mountpoint(mount_root) + .await + .map_err(Error::from) + } + async fn dyn_nested_dataset_list( &self, name: NestedDatasetLocation, @@ -200,6 +254,22 @@ impl LocalStorage for crate::sim::Storage { self.lock().datasets_config_list().map_err(|err| err.into()) } + fn dyn_dataset_get( + &self, + dataset_name: &String, + ) -> Result { + self.lock().dataset_get(dataset_name).map_err(|err| err.into()) + } + + async fn dyn_ensure_mounted_and_get_mountpoint( + &self, + dataset: NestedDatasetLocation, + mount_root: &Utf8Path, + ) -> Result { + // Simulated storage treats all datasets as mounted. + Ok(dataset.mountpoint(mount_root)) + } + async fn dyn_nested_dataset_list( &self, name: NestedDatasetLocation, @@ -378,7 +448,12 @@ impl<'a> SupportBundleManager<'a> { } // Returns a dataset that the sled has been explicitly configured to use. - async fn get_configured_dataset( + // + // In the context of Support Bundles, this is a "parent dataset", within + // which the "nested support bundle" dataset will be stored. + // + // Returns an error if this dataset is not mounted. + async fn get_mounted_dataset_config( &self, zpool_id: ZpoolUuid, dataset_id: DatasetUuid, @@ -389,6 +464,14 @@ impl<'a> SupportBundleManager<'a> { .get(&dataset_id) .ok_or_else(|| Error::DatasetNotFound)?; + let dataset_props = + self.storage.dyn_dataset_get(&dataset.name.full_name())?; + if !dataset_props.mounted { + return Err(Error::DatasetNotMounted { + dataset: dataset.name.clone(), + }); + } + if dataset.id != dataset_id { return Err(Error::DatasetExistsBadConfig { wanted: dataset_id, @@ -412,7 +495,7 @@ impl<'a> SupportBundleManager<'a> { dataset_id: DatasetUuid, ) -> Result, Error> { let root = - self.get_configured_dataset(zpool_id, dataset_id).await?.name; + self.get_mounted_dataset_config(zpool_id, dataset_id).await?.name; let dataset_location = NestedDatasetLocation { path: String::from(""), root }; let datasets = self @@ -434,9 +517,13 @@ impl<'a> SupportBundleManager<'a> { }; // The dataset for a support bundle exists. - let support_bundle_path = dataset - .name - .mountpoint(&self.storage.zpool_mountpoint_root()) + let support_bundle_path = self + .storage + .dyn_ensure_mounted_and_get_mountpoint( + dataset.name, + &self.storage.zpool_mountpoint_root(), + ) + .await? .join(BUNDLE_FILE_NAME); // Identify whether or not the final "bundle" file exists. @@ -522,28 +609,19 @@ impl<'a> SupportBundleManager<'a> { "bundle_id" => support_bundle_id.to_string(), )); info!(log, "creating support bundle"); + + // Access the parent dataset (presumably "crypt/debug") + // where the support bundled will be mounted. let root = - self.get_configured_dataset(zpool_id, dataset_id).await?.name; + self.get_mounted_dataset_config(zpool_id, dataset_id).await?.name; let dataset = NestedDatasetLocation { path: support_bundle_id.to_string(), root }; - // The mounted root of the support bundle dataset - let support_bundle_dir = - dataset.mountpoint(&self.storage.zpool_mountpoint_root()); - let support_bundle_path = support_bundle_dir.join(BUNDLE_FILE_NAME); - let support_bundle_path_tmp = support_bundle_dir.join(format!( - "bundle-{}.tmp", - thread_rng() - .sample_iter(Alphanumeric) - .take(6) - .map(char::from) - .collect::() - )); // Ensure that the dataset exists. info!(log, "Ensuring dataset exists for bundle"); self.storage .dyn_nested_dataset_ensure(NestedDatasetConfig { - name: dataset, + name: dataset.clone(), inner: SharedDatasetConfig { compression: CompressionAlgorithm::On, quota: None, @@ -553,6 +631,24 @@ impl<'a> SupportBundleManager<'a> { .await?; info!(log, "Dataset does exist for bundle"); + // The mounted root of the support bundle dataset + let support_bundle_dir = self + .storage + .dyn_ensure_mounted_and_get_mountpoint( + dataset, + &self.storage.zpool_mountpoint_root(), + ) + .await?; + let support_bundle_path = support_bundle_dir.join(BUNDLE_FILE_NAME); + let support_bundle_path_tmp = support_bundle_dir.join(format!( + "bundle-{}.tmp", + thread_rng() + .sample_iter(Alphanumeric) + .take(6) + .map(char::from) + .collect::() + )); + // Exit early if the support bundle already exists if tokio::fs::try_exists(&support_bundle_path).await? { if !Self::sha2_checksum_matches( @@ -624,7 +720,7 @@ impl<'a> SupportBundleManager<'a> { )); info!(log, "Destroying support bundle"); let root = - self.get_configured_dataset(zpool_id, dataset_id).await?.name; + self.get_mounted_dataset_config(zpool_id, dataset_id).await?.name; self.storage .dyn_nested_dataset_destroy(NestedDatasetLocation { path: support_bundle_id.to_string(), @@ -641,13 +737,20 @@ impl<'a> SupportBundleManager<'a> { dataset_id: DatasetUuid, support_bundle_id: SupportBundleUuid, ) -> Result { + // Access the parent dataset where the support bundle is stored. let root = - self.get_configured_dataset(zpool_id, dataset_id).await?.name; + self.get_mounted_dataset_config(zpool_id, dataset_id).await?.name; let dataset = NestedDatasetLocation { path: support_bundle_id.to_string(), root }; + // The mounted root of the support bundle dataset - let support_bundle_dir = - dataset.mountpoint(&self.storage.zpool_mountpoint_root()); + let support_bundle_dir = self + .storage + .dyn_ensure_mounted_and_get_mountpoint( + dataset, + &self.storage.zpool_mountpoint_root(), + ) + .await?; let path = support_bundle_dir.join(BUNDLE_FILE_NAME); let f = tokio::fs::File::open(&path).await?; @@ -1428,6 +1531,235 @@ mod tests { logctx.cleanup_successful(); } + async fn is_mounted(dataset: &str) -> bool { + let mut command = tokio::process::Command::new(illumos_utils::zfs::ZFS); + let cmd = command.args(&["list", "-Hpo", "mounted", dataset]); + let output = cmd.output().await.unwrap(); + assert!(output.status.success(), "Failed to list dataset: {output:?}"); + String::from_utf8_lossy(&output.stdout).trim() == "yes" + } + + async fn unmount(dataset: &str) { + let mut command = tokio::process::Command::new(illumos_utils::PFEXEC); + let cmd = + command.args(&[illumos_utils::zfs::ZFS, "unmount", "-f", dataset]); + let output = cmd.output().await.unwrap(); + assert!( + output.status.success(), + "Failed to unmount dataset: {output:?}" + ); + } + + #[tokio::test] + async fn cannot_create_bundle_on_unmounted_parent() { + let logctx = test_setup_log("cannot_create_bundle_on_unmounted_parent"); + let log = &logctx.log; + + // Set up storage + let harness = SingleU2StorageHarness::new(log).await; + + // For this test, we'll add a dataset that can contain our bundles. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Before we actually create the bundle: + // + // Unmount the "parent dataset". This is equivalent to trying to create + // a support bundle when the debug dataset exists, but has not been + // mounted yet. + let parent_dataset = mgr + .get_mounted_dataset_config(harness.zpool_id, dataset_id) + .await + .expect("Could not get parent dataset from test harness") + .name; + let parent_dataset_name = parent_dataset.full_name(); + assert!(is_mounted(&parent_dataset_name).await); + unmount(&parent_dataset_name).await; + assert!(!is_mounted(&parent_dataset_name).await); + + // Create a new bundle + let err = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect_err("Should not have been able to create support bundle"); + let Error::DatasetNotMounted { dataset } = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!( + dataset, parent_dataset, + "Unexpected 'parent dataset' in error message" + ); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn listing_bundles_mounts_them() { + let logctx = test_setup_log("listing_bundles_mounts_them"); + let log = &logctx.log; + + // Set up storage + let harness = SingleU2StorageHarness::new(log).await; + + // For this test, we'll add a dataset that can contain our bundles. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Create a new bundle + let _ = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + + // Peek under the hood: We should be able to observe the support + // bundle as a nested dataset. + let root = mgr + .get_mounted_dataset_config(harness.zpool_id, dataset_id) + .await + .expect("Could not get parent dataset from test harness") + .name; + let nested_dataset = + NestedDatasetLocation { path: support_bundle_id.to_string(), root }; + let nested_dataset_name = nested_dataset.full_name(); + + // The dataset was mounted after creation. + assert!(is_mounted(&nested_dataset_name).await); + + // We can manually unmount this dataset. + unmount(&nested_dataset_name).await; + assert!(!is_mounted(&nested_dataset_name).await); + + // When we "list" this nested dataset, it'll be mounted once more. + let _ = mgr + .list(harness.zpool_id, dataset_id) + .await + .expect("Should have been able to list bundle"); + assert!(is_mounted(&nested_dataset_name).await); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn getting_bundles_mounts_them() { + let logctx = test_setup_log("getting_bundles_mounts_them"); + let log = &logctx.log; + + // Set up storage + let harness = SingleU2StorageHarness::new(log).await; + + // For this test, we'll add a dataset that can contain our bundles. + let dataset_id = DatasetUuid::new_v4(); + harness.configure_dataset(dataset_id, DatasetKind::Debug).await; + + // Access the Support Bundle API + let mgr = SupportBundleManager::new( + log, + harness.storage_test_harness.handle(), + ); + let support_bundle_id = SupportBundleUuid::new_v4(); + let zipfile_data = example_zipfile(); + let hash = ArtifactHash( + Sha256::digest(zipfile_data.as_slice()) + .as_slice() + .try_into() + .unwrap(), + ); + + // Create a new bundle + let _ = mgr + .create( + harness.zpool_id, + dataset_id, + support_bundle_id, + hash, + stream::once(async { + Ok(Bytes::copy_from_slice(zipfile_data.as_slice())) + }), + ) + .await + .expect("Should have created support bundle"); + + // Peek under the hood: We should be able to observe the support + // bundle as a nested dataset. + let root = mgr + .get_mounted_dataset_config(harness.zpool_id, dataset_id) + .await + .expect("Could not get parent dataset from test harness") + .name; + let nested_dataset = + NestedDatasetLocation { path: support_bundle_id.to_string(), root }; + let nested_dataset_name = nested_dataset.full_name(); + + // The dataset was mounted after creation. + assert!(is_mounted(&nested_dataset_name).await); + + // We can manually unmount this dataset. + unmount(&nested_dataset_name).await; + assert!(!is_mounted(&nested_dataset_name).await); + + // When we "get" this nested dataset, it'll be mounted once more. + let _ = mgr + .get( + harness.zpool_id, + dataset_id, + support_bundle_id, + None, + SupportBundleQueryType::Whole, + ) + .await + .expect("Should have been able to GET bundle"); + assert!(is_mounted(&nested_dataset_name).await); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn creation_idempotency() { let logctx = test_setup_log("creation_idempotency"); diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index 0d28acad01e..4a5fd6feae5 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -266,7 +266,7 @@ pub(crate) async fn ensure_zpool_has_datasets( let name = format!("{}/{}", zpool_name, dataset); let result = Zfs::ensure_dataset(zfs::DatasetEnsureArgs { name: &name, - mountpoint: Mountpoint::Path(mountpoint), + mountpoint: Mountpoint(mountpoint), can_mount: zfs::CanMount::On, zoned, encryption_details: Some(encryption_details), @@ -337,7 +337,7 @@ pub(crate) async fn ensure_zpool_has_datasets( }); Zfs::ensure_dataset(zfs::DatasetEnsureArgs { name, - mountpoint: Mountpoint::Path(mountpoint), + mountpoint: Mountpoint(mountpoint), can_mount: zfs::CanMount::On, zoned, encryption_details, diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 37e4747622b..ba835176f3a 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -136,7 +136,10 @@ pub struct NestedDatasetLocation { } impl NestedDatasetLocation { - pub fn mountpoint(&self, root: &Utf8Path) -> Utf8PathBuf { + /// Returns the desired mountpoint of this dataset. + /// + /// Does not ensure that the dataset is mounted. + pub fn mountpoint(&self, mount_root: &Utf8Path) -> Utf8PathBuf { let mut path = Utf8Path::new(&self.path); // This path must be nested, so we need it to be relative to @@ -152,9 +155,31 @@ impl NestedDatasetLocation { .expect("Path is absolute, but we cannot strip '/' character"); } - self.root.mountpoint(root).join(path) + // mount_root: Usually "/", but can be a tmp dir for tests + // self.root: Parent dataset mountpoint + // path: Path to nested dataset within parent dataset + self.root.mountpoint(mount_root).join(path) } + /// Access the mountpoint of this nested dataset, and ensure it's mounted. + /// + /// If it is not mounted, or cannot be mounted, return an error. + pub async fn ensure_mounted_and_get_mountpoint( + &self, + mount_root: &Utf8Path, + ) -> Result { + let mountpoint = self.mountpoint(mount_root); + Zfs::ensure_dataset_mounted_and_exists( + &self.full_name(), + &Mountpoint(mountpoint.clone()), + )?; + + return Ok(mountpoint); + } + + /// Returns the full name of the nested dataset. + /// + /// This is a combination of the parent and child dataset names. pub fn full_name(&self) -> String { if self.path.is_empty() { self.root.full_name().to_string() @@ -400,6 +425,15 @@ impl StorageHandle { rx.await.unwrap() } + /// Ensures that a dataset exists, nested somewhere arbitrary within + /// a Nexus-controlled dataset. + /// + /// This function does mount the dataset according to `config`. + /// However, this dataset is not automatically mounted on reboot. + /// + /// If you're trying to access a nested dataset later, consider + /// using the [NestedDatasetLocation::ensure_mounted_and_get_mountpoint] + /// function. pub async fn nested_dataset_ensure( &self, config: NestedDatasetConfig, @@ -1078,7 +1112,7 @@ impl StorageManager { let mountpoint_path = config.name.mountpoint(mountpoint_root); let details = DatasetCreationDetails { zoned: config.name.kind().zoned(), - mountpoint: Mountpoint::Path(mountpoint_path), + mountpoint: Mountpoint(mountpoint_path), full_name: config.name.full_name(), }; @@ -1167,7 +1201,7 @@ impl StorageManager { let details = DatasetCreationDetails { zoned: false, - mountpoint: Mountpoint::Path(mountpoint_path), + mountpoint: Mountpoint(mountpoint_path), full_name: config.name.full_name(), }; @@ -1209,6 +1243,8 @@ impl StorageManager { let log = self.log.new(o!("request" => "nested_dataset_list")); info!(log, "Listing nested datasets"); + // Observe all propreties for this nested datasets, including + // children. We'll apply user-specified filters later. let full_name = name.full_name(); let properties = Zfs::get_dataset_properties( &[full_name], @@ -1524,7 +1560,7 @@ impl StorageManager { let size_details = None; Zfs::ensure_dataset(DatasetEnsureArgs { name: fs_name, - mountpoint: Mountpoint::Path(Utf8PathBuf::from("/data")), + mountpoint: Mountpoint(Utf8PathBuf::from("/data")), can_mount: CanMount::On, zoned, encryption_details,