Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 98 additions & 43 deletions illumos-utils/src/zfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,9 @@ enum EnsureDatasetErrorRaw {
#[error("Unexpected output from ZFS commands: {0}")]
Output(String),

#[error("Dataset does not exist")]
DoesNotExist,

#[error("Failed to mount filesystem")]
MountFsFailed(#[source] crate::ExecutionError),

Expand Down Expand Up @@ -220,18 +223,11 @@ pub struct Zfs {}

/// Describes a mountpoint for a ZFS filesystem.
#[derive(Debug, Clone)]
pub enum Mountpoint {
#[allow(dead_code)]
Legacy,
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It simplifies logic elsewhere if this is "always a path". I went ahead and updated this, since there appear to be no users of "legacy".

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The dataset mounted at "/" will actually show up as a legacy mountpoint, however I did a quick spot check and it doesn't look like we use this anywhere when reading from zfs to get a mountpoint.

Path(Utf8PathBuf),
}
pub struct Mountpoint(pub Utf8PathBuf);

impl fmt::Display for Mountpoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Mountpoint::Legacy => write!(f, "legacy"),
Mountpoint::Path(p) => write!(f, "{p}"),
}
write!(f, "{}", self.0)
}
}

Expand Down Expand Up @@ -507,12 +503,22 @@ fn build_zfs_set_key_value_pairs(
}

/// Describes the ZFS "canmount" options.
#[derive(Copy, Clone, Debug)]
pub enum CanMount {
On,
Off,
NoAuto,
}

impl CanMount {
fn wants_mounting(&self) -> bool {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is just to make the code more readable below. Should be the same functionality as before.

match self {
CanMount::On => true,
CanMount::Off | CanMount::NoAuto => false,
}
}
}

/// Arguments to [Zfs::ensure_dataset].
pub struct DatasetEnsureArgs<'a> {
/// The full path of the ZFS dataset.
Expand Down Expand Up @@ -796,6 +802,21 @@ fn is_directory_immutable(
return Ok(result);
}

struct DatasetMountInfo {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We used to return a tuple of bools, I'm just making this explicit for readability.

exists: bool,
mounted: bool,
}

impl DatasetMountInfo {
fn exists(mounted: bool) -> Self {
Self { exists: true, mounted }
}

fn does_not_exist() -> Self {
Self { exists: false, mounted: false }
}
}

impl Zfs {
/// Lists all datasets within a pool or existing dataset.
///
Expand Down Expand Up @@ -900,6 +921,50 @@ impl Zfs {
Ok(())
}

/// Ensures that a ZFS dataset is mounted
///
/// Returns an error if the dataset exists, but cannot be mounted.
pub fn ensure_dataset_mounted_and_exists(
name: &str,
mountpoint: &Mountpoint,
) -> Result<(), EnsureDatasetError> {
Self::ensure_dataset_mounted_and_exists_inner(name, mountpoint)
.map_err(|err| EnsureDatasetError {
name: name.to_string(),
err,
})?;
Ok(())
}

fn ensure_dataset_mounted_and_exists_inner(
name: &str,
mountpoint: &Mountpoint,
) -> Result<(), EnsureDatasetErrorRaw> {
let mount_info = Self::dataset_exists(name, mountpoint)?;
if !mount_info.exists {
return Err(EnsureDatasetErrorRaw::DoesNotExist);
}

if !mount_info.mounted {
Self::ensure_dataset_mounted(name, mountpoint)?;
}
return Ok(());
}

fn ensure_dataset_mounted(
name: &str,
mountpoint: &Mountpoint,
) -> Result<(), EnsureDatasetErrorRaw> {
ensure_empty_immutable_mountpoint(&mountpoint.0).map_err(|err| {
EnsureDatasetErrorRaw::MountpointCreation {
mountpoint: mountpoint.0.to_path_buf(),
err,
}
})?;
Self::mount_dataset(name)?;
Ok(())
}

/// Creates a new ZFS dataset unless one already exists.
///
/// Refer to [DatasetEnsureArgs] for details on the supplied arguments.
Expand All @@ -923,53 +988,43 @@ impl Zfs {
additional_options,
}: DatasetEnsureArgs,
) -> Result<(), EnsureDatasetErrorRaw> {
let (exists, mounted) = Self::dataset_exists(name, &mountpoint)?;
let dataset_info = Self::dataset_exists(name, &mountpoint)?;

// Non-zoned datasets with an explicit mountpoint and the
// "canmount=on" property should be mounted within the global zone.
//
// Zoned datasets are mounted when their zones are booted, so
// we don't do this mountpoint manipulation for them.
let wants_mounting =
!zoned && !dataset_info.mounted && can_mount.wants_mounting();
let props = build_zfs_set_key_value_pairs(size_details, id);
if exists {

if dataset_info.exists {
// If the dataset already exists: Update properties which might
// have changed, and ensure it has been mounted if it needs
// to be mounted.
Self::set_values(name, props.as_slice())
.map_err(|err| EnsureDatasetErrorRaw::from(err.err))?;

if !zoned && !mounted {
if let (CanMount::On, Mountpoint::Path(path)) =
(&can_mount, &mountpoint)
{
ensure_empty_immutable_mountpoint(&path).map_err(
|err| EnsureDatasetErrorRaw::MountpointCreation {
mountpoint: path.to_path_buf(),
err,
},
)?;
Self::mount_dataset(name)?;
}
if wants_mounting {
Self::ensure_dataset_mounted(name, &mountpoint)?;
}

return Ok(());
}

// If the dataset doesn't exist, create it.

// Non-zoned datasets with an explicit mountpoint and the
// "canmount=on" property should be mounted within the global zone.
//
// We'll ensure they have an empty immutable mountpoint before
// creating the dataset itself, which will also mount it.
//
// Zoned datasets are mounted when their zones are booted, so
// we don't do this mountpoint manipulation for them.
if !zoned {
if let (CanMount::On, Mountpoint::Path(path)) =
(&can_mount, &mountpoint)
{
ensure_empty_immutable_mountpoint(&path).map_err(|err| {
EnsureDatasetErrorRaw::MountpointCreation {
mountpoint: path.to_path_buf(),
err,
}
})?;
}
if wants_mounting {
let path = &mountpoint.0;
ensure_empty_immutable_mountpoint(&path).map_err(|err| {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be ensure_dataset_mounted()?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, this is on the creation path. The dataset does not exist yet.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah okay, the "wants_mounting" is a little confusing but I understand now. Maybe add a comment that mentions creating the immutable mountpoint for future mounting.

Copy link
Collaborator Author

@smklein smklein Apr 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, I have a comment on lines 1012 - 1019 which tries to cover this:

        // Non-zoned datasets with an explicit mountpoint and the
        // "canmount=on" property should be mounted within the global zone.
        //
        // We'll ensure they have an empty immutable mountpoint before
        // creating the dataset itself, which will also mount it.
        //
        // Zoned datasets are mounted when their zones are booted, so
        // we don't do this mountpoint manipulation for them.

Do you have recommendations for how to make this more clear? My read of this is that it already does mention "creating the immutable mountpoint for future mounting", in particular:

        // We'll ensure they have an empty immutable mountpoint before
        // creating the dataset itself, which will also mount it.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess I can re-shuffle the comment to go alongside the wants_mounting variable.

EnsureDatasetErrorRaw::MountpointCreation {
mountpoint: path.to_path_buf(),
err,
}
})?;
}

let mut command = std::process::Command::new(PFEXEC);
Expand Down Expand Up @@ -1048,7 +1103,7 @@ impl Zfs {
fn dataset_exists(
name: &str,
mountpoint: &Mountpoint,
) -> Result<(bool, bool), EnsureDatasetErrorRaw> {
) -> Result<DatasetMountInfo, EnsureDatasetErrorRaw> {
let mut command = std::process::Command::new(ZFS);
let cmd = command.args(&[
"list",
Expand All @@ -1064,9 +1119,9 @@ impl Zfs {
return Err(EnsureDatasetErrorRaw::Output(stdout.to_string()));
}
let mounted = values[3] == "yes";
Ok((true, mounted))
Ok(DatasetMountInfo::exists(mounted))
} else {
Ok((false, false))
Ok(DatasetMountInfo::does_not_exist())
}
}

Expand Down
2 changes: 1 addition & 1 deletion sled-agent/src/backing_fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ pub(crate) fn ensure_backing_fs(
sled_storage::dataset::M2_BACKING_DATASET,
bfs.name
);
let mountpoint = Mountpoint::Path(Utf8PathBuf::from(bfs.mountpoint));
let mountpoint = Mountpoint(Utf8PathBuf::from(bfs.mountpoint));

info!(log, "Ensuring dataset {}", dataset);

Expand Down
2 changes: 1 addition & 1 deletion sled-agent/src/bootstrap/pre_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ fn ensure_zfs_key_directory_exists(log: &Logger) -> Result<(), StartError> {
fn ensure_zfs_ramdisk_dataset() -> Result<(), StartError> {
Zfs::ensure_dataset(zfs::DatasetEnsureArgs {
name: zfs::ZONE_ZFS_RAMDISK_DATASET,
mountpoint: zfs::Mountpoint::Path(Utf8PathBuf::from(
mountpoint: zfs::Mountpoint(Utf8PathBuf::from(
zfs::ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT,
)),
can_mount: zfs::CanMount::On,
Expand Down
50 changes: 50 additions & 0 deletions sled-agent/src/sim/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ use crucible_agent_client::types::{
};
use dropshot::HandlerTaskMode;
use dropshot::HttpError;
use illumos_utils::zfs::DatasetProperties;
use omicron_common::api::external::ByteCount;
use omicron_common::disk::DatasetManagementStatus;
use omicron_common::disk::DatasetName;
use omicron_common::disk::DatasetsConfig;
Expand Down Expand Up @@ -1362,6 +1364,54 @@ impl StorageInner {
Ok(config.clone())
}

pub fn dataset_get(
&self,
dataset_name: &String,
) -> Result<DatasetProperties, HttpError> {
let Some(config) = self.dataset_config.as_ref() else {
return Err(HttpError::for_not_found(
None,
"No control plane datasets".into(),
));
};

for (id, dataset) in config.datasets.iter() {
if dataset.name.full_name().as_str() == dataset_name {
return Ok(DatasetProperties {
id: Some(*id),
name: dataset_name.to_string(),
mounted: true,
avail: ByteCount::from_kibibytes_u32(1024),
used: ByteCount::from_kibibytes_u32(1024),
quota: dataset.inner.quota,
reservation: dataset.inner.reservation,
compression: dataset.inner.compression.to_string(),
});
}
}

for (nested_dataset_name, nested_dataset_storage) in
self.nested_datasets.iter()
{
if nested_dataset_name.full_name().as_str() == dataset_name {
let config = &nested_dataset_storage.config.inner;

return Ok(DatasetProperties {
id: None,
name: dataset_name.to_string(),
mounted: true,
avail: ByteCount::from_kibibytes_u32(1024),
used: ByteCount::from_kibibytes_u32(1024),
quota: config.quota,
reservation: config.reservation,
compression: config.compression.to_string(),
});
}
}

return Err(HttpError::for_not_found(None, "Dataset not found".into()));
}

pub fn datasets_ensure(
&mut self,
config: DatasetsConfig,
Expand Down
Loading
Loading