Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ trait GetMountpoint: AsRef<ZpoolName> {
}

#[derive(Debug)]
enum DumpSetupCmd {
enum DebugCollectorCmd {
ArchiveFormerZoneRoot {
zone_root: Utf8PathBuf,
zone_name: String,
Expand All @@ -200,7 +200,7 @@ enum DumpSetupCmd {
},
}

struct DumpSetupWorker {
struct DebugCollectorWorker {
core_dataset_names: Vec<CoreZpool>,
debug_dataset_names: Vec<DebugZpool>,

Expand All @@ -215,32 +215,32 @@ struct DumpSetupWorker {
savecored_slices: HashSet<DumpSlicePath>,

log: Logger,
rx: Receiver<DumpSetupCmd>,
rx: Receiver<DebugCollectorCmd>,
coredumpadm_invoker: Box<dyn CoreDumpAdmInvoker + Send + Sync>,
zfs_invoker: Box<dyn ZfsInvoker + Send + Sync>,
zone_invoker: Box<dyn ZoneInvoker + Send + Sync>,
}

pub struct DumpSetup {
tx: tokio::sync::mpsc::Sender<DumpSetupCmd>,
pub struct DebugCollector {
tx: tokio::sync::mpsc::Sender<DebugCollectorCmd>,
mount_config: Arc<MountConfig>,
_poller: tokio::task::JoinHandle<()>,
log: Logger,
}

impl DumpSetup {
impl DebugCollector {
pub fn new(log: &Logger, mount_config: Arc<MountConfig>) -> Self {
let (tx, rx) = tokio::sync::mpsc::channel(16);
let worker = DumpSetupWorker::new(
let worker = DebugCollectorWorker::new(
Box::new(RealCoreDumpAdm {}),
Box::new(RealZfs {}),
Box::new(RealZone {}),
log.new(o!("component" => "DumpSetup-worker")),
log.new(o!("component" => "DebugCollector-worker")),
rx,
);
let _poller =
tokio::spawn(async move { worker.poll_file_archival().await });
let log = log.new(o!("component" => "DumpSetup"));
let log = log.new(o!("component" => "DebugCollector"));
Self { tx, mount_config, _poller, log }
}

Expand All @@ -249,7 +249,7 @@ impl DumpSetup {
///
/// This function returns only once this request has been handled, which
/// can be used as a signal by callers that any "old disks" are no longer
/// being used by [DumpSetup].
/// being used by [DebugCollector].
pub async fn update_dumpdev_setup(
&self,
disks: impl Iterator<Item = &Disk>,
Expand Down Expand Up @@ -322,19 +322,19 @@ impl DumpSetup {
let (tx, rx) = oneshot::channel();
if let Err(err) = self
.tx
.send(DumpSetupCmd::UpdateDumpdevSetup {
.send(DebugCollectorCmd::UpdateDumpdevSetup {
dump_slices: m2_dump_slices,
debug_datasets: u2_debug_datasets,
core_datasets: m2_core_datasets,
update_complete_tx: tx,
})
.await
{
error!(log, "DumpSetup channel closed: {:?}", err.0);
error!(log, "DebugCollector channel closed: {:?}", err.0);
};

if let Err(err) = rx.await {
error!(log, "DumpSetup failed to await update"; "err" => ?err);
error!(log, "DebugCollector failed to await update"; "err" => ?err);
}
}

Expand Down Expand Up @@ -383,7 +383,7 @@ impl DumpSetup {
info!(log, "requesting archive of former zone root");
let zone_root = zone_root.to_owned();
let zone_name = file_name.to_string();
let cmd = DumpSetupCmd::ArchiveFormerZoneRoot {
let cmd = DebugCollectorCmd::ArchiveFormerZoneRoot {
zone_root,
zone_name,
completion_tx,
Expand All @@ -392,7 +392,7 @@ impl DumpSetup {
error!(
log,
"failed to request archive of former zone root";
"error" => "DumpSetup channel closed"
"error" => "DebugCollector channel closed"
);
}
}
Expand Down Expand Up @@ -603,13 +603,13 @@ fn safe_to_delete(path: &Utf8Path, meta: &std::fs::Metadata) -> bool {
return true;
}

impl DumpSetupWorker {
impl DebugCollectorWorker {
fn new(
coredumpadm_invoker: Box<dyn CoreDumpAdmInvoker + Send + Sync>,
zfs_invoker: Box<dyn ZfsInvoker + Send + Sync>,
zone_invoker: Box<dyn ZoneInvoker + Send + Sync>,
log: Logger,
rx: Receiver<DumpSetupCmd>,
rx: Receiver<DebugCollectorCmd>,
) -> Self {
Self {
core_dataset_names: vec![],
Expand All @@ -630,7 +630,7 @@ impl DumpSetupWorker {
}

async fn poll_file_archival(mut self) {
info!(self.log, "DumpSetup poll loop started.");
info!(self.log, "DebugCollector poll loop started.");

// A oneshot which helps callers track when updates have propagated.
//
Expand All @@ -642,7 +642,7 @@ impl DumpSetupWorker {
loop {
match tokio::time::timeout(ARCHIVAL_INTERVAL, self.rx.recv()).await
{
Ok(Some(DumpSetupCmd::UpdateDumpdevSetup {
Ok(Some(DebugCollectorCmd::UpdateDumpdevSetup {
dump_slices,
debug_datasets,
core_datasets,
Expand All @@ -656,7 +656,7 @@ impl DumpSetupWorker {
core_datasets,
);
}
Ok(Some(DumpSetupCmd::ArchiveFormerZoneRoot {
Ok(Some(DebugCollectorCmd::ArchiveFormerZoneRoot {
zone_root,
zone_name,
completion_tx,
Expand Down Expand Up @@ -1586,7 +1586,7 @@ mod tests {
);
const NOT_MOUNTED_INTERNAL: &str =
"oxi_acab2069-6e63-6c75-de73-20c06c756db0";
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::new(FakeZfs {
zpool_props: [(
Expand Down Expand Up @@ -1641,7 +1641,7 @@ mod tests {
name: ZpoolName::from_str(ERROR_INTERNAL).unwrap(),
};
const ZPOOL_MNT: &str = "/path/to/internal/zpool";
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::new(FakeZfs {
zpool_props: [
Expand Down Expand Up @@ -1732,7 +1732,7 @@ mod tests {
let logctx = omicron_test_utils::dev::test_setup_log(
"test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir",
);
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::<FakeZfs>::default(),
Box::<FakeZone>::default(),
Expand Down Expand Up @@ -1760,7 +1760,7 @@ mod tests {
let logctx = omicron_test_utils::dev::test_setup_log(
"test_dumpadm_called_when_vacant_slice_but_no_dir",
);
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::<FakeZfs>::default(),
Box::<FakeZone>::default(),
Expand Down Expand Up @@ -1791,7 +1791,7 @@ mod tests {
const MOUNTED_EXTERNAL: &str =
"oxp_446f6e74-4469-6557-6f6e-646572696e67";
const ZPOOL_MNT: &str = "/path/to/external/zpool";
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::new(FakeZfs {
zpool_props: [(
Expand Down Expand Up @@ -1853,7 +1853,7 @@ mod tests {
"oxi_474e554e-6174-616c-6965-4e677579656e";
const MOUNTED_EXTERNAL: &str =
"oxp_446f6e74-4469-6557-6f6e-646572696e67";
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::new(FakeZfs {
zpool_props: [
Expand Down Expand Up @@ -1981,15 +1981,15 @@ mod tests {
}
}

async fn new_dump_setup_worker(
async fn new_debug_collector_worker(
&self,
used: u64,
available: u64,
) -> DumpSetupWorker {
) -> DebugCollectorWorker {
let tempdir_path = self.tempdir.path().to_string();
const MOUNTED_EXTERNAL: &str =
"oxp_446f6e74-4469-6557-6f6e-646572696e67";
let mut worker = DumpSetupWorker::new(
let mut worker = DebugCollectorWorker::new(
Box::<FakeCoreDumpAdm>::default(),
Box::new(FakeZfs {
zpool_props: [
Expand Down Expand Up @@ -2190,7 +2190,7 @@ mod tests {
USED, CAPACITY,
);

let worker = files.new_dump_setup_worker(USED, AVAILABLE).await;
let worker = files.new_debug_collector_worker(USED, AVAILABLE).await;

// Before we cleanup: All files in "debug" exist
files.check_all_files_exist();
Expand Down Expand Up @@ -2232,7 +2232,7 @@ mod tests {
USED, CAPACITY,
);

let worker = files.new_dump_setup_worker(USED, AVAILABLE).await;
let worker = files.new_debug_collector_worker(USED, AVAILABLE).await;

// Before we cleanup: All files in "debug" exist
files.check_all_files_exist();
Expand Down Expand Up @@ -2290,7 +2290,7 @@ mod tests {
USED, CAPACITY,
);

let worker = files.new_dump_setup_worker(USED, AVAILABLE).await;
let worker = files.new_debug_collector_worker(USED, AVAILABLE).await;

// Before we cleanup: All files in "debug" exist
files.check_all_files_exist();
Expand Down Expand Up @@ -2351,7 +2351,7 @@ mod tests {
USED, CAPACITY,
);

let worker = files.new_dump_setup_worker(USED, AVAILABLE).await;
let worker = files.new_debug_collector_worker(USED, AVAILABLE).await;

// Before we cleanup: All files in "debug" exist
files.check_all_files_exist();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//! response to changes in available disks.

use crate::InternalDisksReceiver;
use crate::dump_setup::DumpSetup;
use crate::debug_collector::DebugCollector;
use camino::Utf8PathBuf;
use debug_ignore::DebugIgnore;
use sled_storage::config::MountConfig;
Expand All @@ -33,16 +33,16 @@ pub(crate) fn spawn(
// to enqueue the request or for the request to complete.
let (archive_tx, archive_rx) = mpsc::channel(1);

let dump_setup_task = DumpSetupTask {
let debug_collector_task = DebugCollectorTask {
internal_disks_rx,
external_disks_rx,
archive_rx,
dump_setup: DumpSetup::new(base_log, mount_config),
debug_collector: DebugCollector::new(base_log, mount_config),
last_disks_used: HashSet::new(),
log: base_log.new(slog::o!("component" => "DumpSetupTask")),
log: base_log.new(slog::o!("component" => "DebugCollectorTask")),
};

tokio::spawn(dump_setup_task.run());
tokio::spawn(debug_collector_task.run());

FormerZoneRootArchiver {
log: DebugIgnore(
Expand All @@ -52,23 +52,24 @@ pub(crate) fn spawn(
}
}

struct DumpSetupTask {
struct DebugCollectorTask {
// Input channels on which we receive updates about disk changes.
internal_disks_rx: InternalDisksReceiver,
external_disks_rx: watch::Receiver<HashSet<Disk>>,
// Input channel on which we receive requests to archive zone roots.
archive_rx: mpsc::Receiver<FormerZoneRootArchiveRequest>,

// Invokes dumpadm(8) and savecore(8) when new disks are encountered
dump_setup: DumpSetup,
debug_collector: DebugCollector,

// Set of internal + external disks we most recently passed to `dump_setup`.
// Set of internal + external disks we most recently passed to the
// Debug Collector
last_disks_used: HashSet<Disk>,

log: Logger,
}

impl DumpSetupTask {
impl DebugCollectorTask {
async fn run(mut self) {
self.update_setup_if_needed().await;

Expand Down Expand Up @@ -119,7 +120,7 @@ impl DumpSetupTask {
completion_tx
} = request;
self
.dump_setup
.debug_collector
.archive_former_zone_root(&path, completion_tx)
.await;
}
Expand All @@ -138,7 +139,7 @@ impl DumpSetupTask {
.collect::<HashSet<_>>();

if disks_avail != self.last_disks_used {
self.dump_setup.update_dumpdev_setup(disks_avail.iter()).await;
self.debug_collector.update_dumpdev_setup(disks_avail.iter()).await;
self.last_disks_used = disks_avail;
}
}
Expand Down
6 changes: 3 additions & 3 deletions sled-agent/config-reconciler/src/handle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ use crate::SledAgentFacilities;
use crate::TimeSyncStatus;
use crate::dataset_serialization_task::DatasetTaskHandle;
use crate::dataset_serialization_task::NestedDatasetMountError;
use crate::dump_setup_task;
use crate::dump_setup_task::FormerZoneRootArchiver;
use crate::debug_collector_task;
use crate::debug_collector_task::FormerZoneRootArchiver;
use crate::internal_disks::InternalDisksReceiver;
use crate::ledger::CurrentSledConfig;
use crate::ledger::LedgerTaskHandle;
Expand Down Expand Up @@ -136,7 +136,7 @@ impl ConfigReconcilerHandle {
// Spawn the task that manages dump devices.
let (external_disks_tx, external_disks_rx) =
watch::channel(HashSet::new());
let former_zone_root_archiver = dump_setup_task::spawn(
let former_zone_root_archiver = debug_collector_task::spawn(
internal_disks_rx.clone(),
external_disks_rx,
Arc::clone(&mount_config),
Expand Down
4 changes: 2 additions & 2 deletions sled-agent/config-reconciler/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@
//! [`ConfigReconcilerHandle::inventory()`].

mod dataset_serialization_task;
mod debug_collector;
mod debug_collector_task;
mod disks_common;
mod dump_setup;
mod dump_setup_task;
mod handle;
mod host_phase_2;
mod internal_disks;
Expand Down
2 changes: 1 addition & 1 deletion sled-agent/config-reconciler/src/reconciler_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use crate::InternalDisksReceiver;
use crate::SledAgentArtifactStore;
use crate::TimeSyncConfig;
use crate::dataset_serialization_task::DatasetTaskHandle;
use crate::dump_setup_task::FormerZoneRootArchiver;
use crate::debug_collector_task::FormerZoneRootArchiver;
use crate::host_phase_2::BootPartitionReconciler;
use crate::ledger::CurrentSledConfig;
use crate::raw_disks::RawDisksReceiver;
Expand Down
Loading
Loading