From 213caa32713e7dcd76936bbdc8e2ff47fbfab0a0 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 22 Jan 2020 11:43:49 -0500 Subject: [PATCH 01/76] Changed socket type to Dealer, and added outline for creating message map --- Cargo.lock | 2 +- src/disk_manager.rs | 2 +- src/lib/lib.rs | 4 ++-- src/main.rs | 51 ++++++++++++++------------------------------- 4 files changed, 20 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81d787c..741c096 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,7 +198,7 @@ dependencies = [ [[package]] name = "block-utils" version = "0.6.2" -source = "git+https://github.com/mzhong1/block-utils.git#518f7ddae349a925ebf59b4c5601fa4c89a1f1ca" +source = "git+https://github.com/mzhong1/block-utils.git#63cbf0ff3ab7a8e66725d2445a762bd7d0b67af0" dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 9d7fcb0..afb415f 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -187,7 +187,7 @@ fn listen( ) -> BynarResult<()> { debug!("Starting zmq listener with version({:?})", zmq::version()); let context = zmq::Context::new(); - let responder = context.socket(zmq::REP)?; + let responder = context.socket(zmq::DEALER)?; debug!("Listening on tcp://{}:5555", listen_address); // Fail to start if this fails diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 92ad285..7504b8a 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -34,7 +34,7 @@ where pub fn connect(host: &str, port: &str, server_publickey: &str) -> BynarResult { debug!("Starting zmq sender with version({:?})", zmq::version()); let context = zmq::Context::new(); - let requester = context.socket(zmq::REQ)?; + let requester = context.socket(zmq::DEALER)?; let client_keypair = zmq::CurveKeyPair::new()?; debug!("Created new keypair"); requester.set_curve_serverkey(server_publickey)?; @@ -44,7 +44,7 @@ pub fn connect(host: &str, port: &str, server_publickey: &str) -> BynarResult, - /// Redfish credentials - redfish_username: Option, - /// Redfish credentials - redfish_password: Option, - /// The port redfish is listening on - redfish_port: Option, - slack_webhook: Option, - slack_channel: Option, - slack_botname: Option, - vault_endpoint: Option, - vault_token: Option, - pub jira_user: String, - pub jira_password: String, - pub jira_host: String, - pub jira_issue_type: String, - pub jira_priority: String, - pub jira_project_id: String, - pub jira_ticket_assignee: String, - pub proxy: Option, - pub database: DBConfig, +// a specific operation and its outcome +struct DiskOp { + op_type: Op, // operation type + ret_val: Option, //None if outcome not yet determined } -#[derive(Clone, Debug, Deserialize)] -pub struct DBConfig { - pub username: String, - pub password: Option, - pub port: u16, - pub endpoint: String, - pub dbname: String, -}*/ +// create a message map to handle list of disk-manager requests +fn create_msg_map() -> BynarResult>>> { + // List out currently mounted block_devices + let mut devices = block_utils::get_block_devices()?; + let mut map: HashMap>> = HashMap::new(); + + // for each block device get its partitions + // add them to HashMap + Ok(map) +} fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); From d4dfb9286680098f94b8b59cbefee127fbf24345 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 22 Jan 2020 16:20:19 -0500 Subject: [PATCH 02/76] implement create function for message request map --- Cargo.lock | 2 +- src/lib/lib.rs | 2 +- src/main.rs | 44 +++++++++++++++++++++++++++++++++++++------- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 741c096..81d787c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,7 +198,7 @@ dependencies = [ [[package]] name = "block-utils" version = "0.6.2" -source = "git+https://github.com/mzhong1/block-utils.git#63cbf0ff3ab7a8e66725d2445a762bd7d0b67af0" +source = "git+https://github.com/mzhong1/block-utils.git#518f7ddae349a925ebf59b4c5601fa4c89a1f1ca" dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 7504b8a..3c62b01 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -44,7 +44,7 @@ pub fn connect(host: &str, port: &str, server_publickey: &str) -> BynarResult, //None if outcome not yet determined } // create a message map to handle list of disk-manager requests fn create_msg_map() -> BynarResult>>> { // List out currently mounted block_devices - let mut devices = block_utils::get_block_devices()?; + let mut devices: Vec = block_utils::get_block_devices()? + .into_iter() + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) + }) + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) + }) + .collect(); let mut map: HashMap>> = HashMap::new(); - - // for each block device get its partitions - // add them to HashMap + let mut partitions = block_utils::get_block_partitions()?; + // for each block device add its partitions to the HashMap + // add them to HashMap + for device in &devices { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + for partition in &partitions { + if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { + if &disk == device { + disk_map.insert(partition.to_path_buf(), None); + } + } + } + map.insert(device.to_path_buf(), disk_map); + } Ok(map) } From 56371f8df7014dca7980e8167fa44d046bc50350 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 23 Jan 2020 15:25:15 -0500 Subject: [PATCH 03/76] Add operation to message map --- src/main.rs | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/src/main.rs b/src/main.rs index c3281aa..67ddd66 100644 --- a/src/main.rs +++ b/src/main.rs @@ -93,6 +93,67 @@ fn create_msg_map() -> BynarResult>>, + dev_path: &PathBuf, + op: DiskOp, +) -> BynarResult<()> { + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + //parent is in the map + if let Some(disk) = message_map.get_mut(&parent) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + if partition.is_some() { + return Ok(()); + } + disk.insert(dev_path.to_path_buf(), Some(op)); + } + } else { + //add to map + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(parent.to_path_buf(), None); + let partitions = block_utils::get_block_partitions()?; + // check if partition parent is device + for partition in &partitions { + if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { + if disk == parent { + disk_map.insert(partition.to_path_buf(), None); + } + } + } + message_map.insert(parent.to_path_buf(), disk_map); + } + } else { + //not partition + //parent is in the map + if let Some(disk) = message_map.get_mut(dev_path) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + if partition.is_some() { + return Ok(()); + } + disk.insert(dev_path.to_path_buf(), Some(op)); + } + } else { + //add to map + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(dev_path.to_path_buf(), None); + let partitions = block_utils::get_block_partitions()?; + // check if partition parent is device + for partition in &partitions { + if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { + if &disk == dev_path { + disk_map.insert(partition.to_path_buf(), None); + } + } + } + message_map.insert(dev_path.to_path_buf(), disk_map); + } + } + Ok(()) +} + fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); let slack = Slack::new( From 7bde826f0a4514861bbdfb80ce253968e71e744a Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 23 Jan 2020 15:55:13 -0500 Subject: [PATCH 04/76] get an operation from the map --- src/main.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/main.rs b/src/main.rs index 67ddd66..8729dce 100644 --- a/src/main.rs +++ b/src/main.rs @@ -47,6 +47,7 @@ use std::process::Command; use std::time::{Duration, Instant}; // a specific operation and its outcome +#[derive(Debug, Clone)] struct DiskOp { op_type: Op, // operation type ret_val: Option, //None if outcome not yet determined @@ -154,6 +155,32 @@ fn add_map_op( Ok(()) } +// get the operation for a device (disk/partition) if one exists +fn get_map_op( + message_map: &HashMap>>, + dev_path: &PathBuf, +) -> BynarResult> { + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + //parent is in the map + if let Some(disk) = message_map.get(&parent) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + return Ok(partition.clone()); + } + } + } else { + //not partition + //parent is in the map + if let Some(disk) = message_map.get(dev_path) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + return Ok(partition.clone()); + } + } + } + return Ok(None); +} + fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); let slack = Slack::new( From c0f9dc52b89bfb9cfc2f1f8a496618ab311c3a50 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 24 Jan 2020 10:03:48 -0500 Subject: [PATCH 05/76] Remove an op from the message map --- src/main.rs | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 8729dce..8b180b7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -74,7 +74,7 @@ fn create_msg_map() -> BynarResult>> = HashMap::new(); - let mut partitions = block_utils::get_block_partitions()?; + let partitions = block_utils::get_block_partitions()?; // for each block device add its partitions to the HashMap // add them to HashMap for device in &devices { @@ -181,6 +181,39 @@ fn get_map_op( return Ok(None); } +// replace the DiskOp associated with the input dev_path None and return the previous DiskOp +// If the dev_path is not in the map error out +fn remove_map_op( + message_map: &mut HashMap>>, + dev_path: &PathBuf, +) -> BynarResult> { + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + //parent is in the map + if let Some(disk) = message_map.get_mut(&parent) { + if let Some(partition) = disk.clone().get(dev_path) { + //set point as None + disk.insert(dev_path.to_path_buf(), None); + // partition in map + return Ok(partition.clone()); + } + } + } else { + //not partition + //parent is in the map + if let Some(disk) = message_map.get_mut(dev_path) { + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), None); + return Ok(partition.clone()); + } + } + } + return Err(BynarError::from(format!( + "Path {} is not in the message map", + dev_path.display() + ))); +} + fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); let slack = Slack::new( From 9957a9918596896d66a550ab741ff063a878a1eb Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 24 Jan 2020 10:23:50 -0500 Subject: [PATCH 06/76] Update the add_map_op function to update the map if it already has an op and return the old one --- src/main.rs | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/main.rs b/src/main.rs index 8b180b7..b282d56 100644 --- a/src/main.rs +++ b/src/main.rs @@ -94,26 +94,25 @@ fn create_msg_map() -> BynarResult>>, dev_path: &PathBuf, op: DiskOp, -) -> BynarResult<()> { +) -> BynarResult> { if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { //parent is in the map if let Some(disk) = message_map.get_mut(&parent) { - if let Some(partition) = disk.get(dev_path) { + if let Some(partition) = disk.clone().get(dev_path) { // partition in map - if partition.is_some() { - return Ok(()); - } disk.insert(dev_path.to_path_buf(), Some(op)); + return Ok(partition.clone()); } + disk.insert(dev_path.to_path_buf(), Some(op)); } else { //add to map let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(parent.to_path_buf(), None); + disk_map.insert(parent.to_path_buf(), Some(op)); let partitions = block_utils::get_block_partitions()?; // check if partition parent is device for partition in &partitions { @@ -129,17 +128,16 @@ fn add_map_op( //not partition //parent is in the map if let Some(disk) = message_map.get_mut(dev_path) { - if let Some(partition) = disk.get(dev_path) { + if let Some(partition) = disk.clone().get(dev_path) { // partition in map - if partition.is_some() { - return Ok(()); - } disk.insert(dev_path.to_path_buf(), Some(op)); + return Ok(partition.clone()); } + disk.insert(dev_path.to_path_buf(), Some(op)); } else { //add to map let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(dev_path.to_path_buf(), None); + disk_map.insert(dev_path.to_path_buf(), Some(op)); let partitions = block_utils::get_block_partitions()?; // check if partition parent is device for partition in &partitions { @@ -152,7 +150,7 @@ fn add_map_op( message_map.insert(dev_path.to_path_buf(), disk_map); } } - Ok(()) + Ok(None) } // get the operation for a device (disk/partition) if one exists From 82bd8f319c08aa917f0ec63c7c5508c13b6b4c90 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 24 Jan 2020 10:49:41 -0500 Subject: [PATCH 07/76] Get a specific disk hashmap --- src/main.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/main.rs b/src/main.rs index b282d56..516c495 100644 --- a/src/main.rs +++ b/src/main.rs @@ -212,6 +212,26 @@ fn remove_map_op( ))); } +// get the hashmap associated with a diskpath from the op map +fn get_disk_map_op( + message_map: &HashMap>>, + dev_path: &PathBuf, +) -> BynarResult>> { + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + //parent is in the map + if let Some(disk) = message_map.get(&parent) { + return Ok(disk.clone()); + } + } else { + //not partition + //parent is in the map + if let Some(disk) = message_map.get(dev_path) { + return Ok(disk.clone()); + } + } + Err(BynarError::from(format!("Path is not a disk in the map"))) +} + fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); let slack = Slack::new( From 5336b74c1f22880437169d2cc057ecca9cbbed29 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 27 Jan 2020 10:35:03 -0500 Subject: [PATCH 08/76] Added channels and edited disk-manager's add_disk and listen functions with threadPool for threaded worker add_disk operations --- Cargo.lock | 24 ++++ Cargo.toml | 1 + api/protos/service.proto | 8 ++ src/disk_manager.rs | 298 ++++++++++++++++++++++----------------- src/lib/error.rs | 2 + 5 files changed, 200 insertions(+), 133 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81d787c..3bc0d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,6 +231,7 @@ dependencies = [ "ceph 3.2.0 (git+https://github.com/mzhong1/ceph-rust)", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "daemonize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "derive-error 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -451,6 +452,27 @@ dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-channel" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crossbeam-deque" version = "0.2.0" @@ -3123,6 +3145,8 @@ dependencies = [ "checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" "checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +"checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +"checksum crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "acec9a3b0b3559f15aee4f90746c4e5e293b701c0f7d3925d24e01645267b68c" "checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" "checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" "checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" diff --git a/Cargo.toml b/Cargo.toml index 0bd35d6..6c67aed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ bytes = "*" ceph = {git = "https://github.com/mzhong1/ceph-rust"}#"~3.0" chrono = "~0.4" clap = "~2" +crossbeam = "~0.7" daemonize = "~0.4" derive-error = "0.0.4" dirs = "~2.0" diff --git a/api/protos/service.proto b/api/protos/service.proto index b12bfd0..b88ea1e 100644 --- a/api/protos/service.proto +++ b/api/protos/service.proto @@ -80,6 +80,8 @@ message OpBoolResult { optional bool value = 2; // error_msg is set if ERR optional string error_msg = 3; + // the type of the operation + required Op op_type = 4; } message OpStringResult { @@ -88,6 +90,8 @@ message OpStringResult { optional string value = 2; // error_msg is set if ERR optional string error_msg = 3; + // the type of the operation + required Op op_type = 4; } message OpOutcomeResult{ @@ -98,6 +102,10 @@ message OpOutcomeResult{ optional bool value = 3; // error_msg set if ERR optional string error_msg = 4; + // the type of the operation + required Op op_type = 5; + // the disk/partition the operation was performed on + optional string disk = 6; } enum OpOutcome { diff --git a/src/disk_manager.rs b/src/disk_manager.rs index afb415f..5b1305f 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -7,6 +7,7 @@ use std::path::Path; use std::process; use std::process::Command; use std::str::FromStr; +use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; @@ -21,6 +22,7 @@ mod test_disk; use crate::backend::BackendType; use block_utils::{Device, MediaType}; use clap::{crate_authors, crate_version, App, Arg}; +use crossbeam::*; use daemonize::Daemonize; use gpt::{disk, header::read_header, partition::read_partitions}; use hashicorp_vault::client::VaultClient; @@ -196,138 +198,173 @@ fn listen( .bind(&format!("tcp://{}:5555", listen_address)) .is_ok()); - 'outer: loop { - let now = Instant::now(); - let events = responder.get_events()? as zmq::PollEvents; - // is the socket readable? - if (events & zmq::POLLIN) != 0 { - let msg = responder.recv_bytes(0)?; - debug!("Got msg len: {}", msg.len()); - trace!("Parsing msg {:?} as hex", msg); - let operation = match parse_from_bytes::(&msg) { - Ok(bytes) => bytes, - Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - continue; - } - }; - - debug!("Operation requested: {:?}", operation.get_Op_type()); - if op_no_disk(&responder, &operation) { - continue; - } - match operation.get_Op_type() { - Op::Add => { - let id = if operation.has_osd_id() { - Some(operation.get_osd_id()) - } else { - None - }; - match add_disk( - &responder, - operation.get_disk(), - &backend_type, - id, - config_dir, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } - }; - } - Op::AddPartition => { - // - } - Op::List => { - match list_disks(&responder) { - Ok(_) => { - info!("List disks finished"); - } + debug!("Building thread pool"); + let pool = rayon::ThreadPoolBuilder::new().num_threads(16).build()?; + let responder = Arc::new(Mutex::new(responder)); + // channel to send results from backend to main thread + let (send_res, recv_res) = crossbeam_channel::unbounded::(); + let (send_disk, recv_disk) = crossbeam_channel::unbounded::(); + pool.scope(|s| 'outer: loop { + match responder.try_lock() { + Ok(responder) => { + let now = Instant::now(); + let events = match responder.get_events() { + Err(zmq::Error::EBUSY) => { + trace!("Socket Busy, skip"); + continue; + } + Err(e) => return Err(BynarError::from(e)), + Ok(e) => e as zmq::PollEvents, + }; + let send_res = send_res.clone(); + // is the socket readable? + if (events & zmq::POLLIN) != 0 { + let msg = responder.recv_bytes(0)?; + debug!("Got msg len: {}", msg.len()); + trace!("Parsing msg {:?} as hex", msg); + let operation = match parse_from_bytes::(&msg) { + Ok(bytes) => bytes, Err(e) => { - error!("List disks error: {:?}", e); + error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + continue; } }; - } - Op::Remove => { - let mut result = OpOutcomeResult::new(); - match safe_to_remove( - &Path::new(operation.get_disk()), - &backend_type, - config_dir, - ) { - Ok((OpOutcome::Success, true)) => { - match remove_disk( + + debug!("Operation requested: {:?}", operation.get_Op_type()); + if op_no_disk(&responder, &operation) { + continue; + } + match operation.get_Op_type() { + Op::Add => { + let id = if operation.has_osd_id() { + Some(operation.get_osd_id()) + } else { + None + }; + s.spawn(move |_| { + let disk = operation.get_disk(); + match add_disk( + &send_res.clone(), + disk, + &backend_type, + id.clone(), + config_dir, + ) { + Ok(_) => { + info!("Add disk finished"); + } + Err(e) => { + error!("Add disk error: {:?}", e); + } + } + }); + /*match add_disk( &responder, operation.get_disk(), &backend_type, + id, config_dir, ) { Ok(_) => { - info!("Remove disk finished"); + info!("Add disk finished"); } Err(e) => { - error!("Remove disk error: {:?}", e); + error!("Add disk error: {:?}", e); } - }; - } - Ok((OpOutcome::Skipped, val)) => { - debug!("Disk skipped"); - result.set_outcome(OpOutcome::Skipped); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = respond_to_client(&result, &responder); - } - Ok((OpOutcome::SkipRepeat, val)) => { - debug!("Disk skipped, safe to remove already ran"); - result.set_outcome(OpOutcome::SkipRepeat); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = respond_to_client(&result, &responder); + };*/ } - Ok((_, false)) => { - debug!("Disk is not safe to remove"); - //Response to client - result.set_value(false); - result.set_outcome(OpOutcome::Success); - result.set_result(ResultType::ERR); - result.set_error_msg("Not safe to remove disk".to_string()); - let _ = respond_to_client(&result, &responder); + Op::AddPartition => { + // } - Err(e) => { - error!("safe to remove failed: {:?}", e); - // Response to client - result.set_value(false); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - let _ = respond_to_client(&result, &responder); - } - }; - } - Op::SafeToRemove => { - match safe_to_remove_disk( - &responder, - operation.get_disk(), - &backend_type, - config_dir, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); + Op::List => { + match list_disks(&send_disk) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } + }; } - Err(e) => { - error!("Safe to remove error: {:?}", e); + Op::Remove => { + let mut result = OpOutcomeResult::new(); + match safe_to_remove( + &Path::new(operation.get_disk()), + &backend_type, + config_dir, + ) { + Ok((OpOutcome::Success, true)) => { + match remove_disk( + &responder, + operation.get_disk(), + &backend_type, + config_dir, + ) { + Ok(_) => { + info!("Remove disk finished"); + } + Err(e) => { + error!("Remove disk error: {:?}", e); + } + }; + } + Ok((OpOutcome::Skipped, val)) => { + debug!("Disk skipped"); + result.set_outcome(OpOutcome::Skipped); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = respond_to_client(&result, &responder); + } + Ok((OpOutcome::SkipRepeat, val)) => { + debug!("Disk skipped, safe to remove already ran"); + result.set_outcome(OpOutcome::SkipRepeat); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = respond_to_client(&result, &responder); + } + Ok((_, false)) => { + debug!("Disk is not safe to remove"); + //Response to client + result.set_value(false); + result.set_outcome(OpOutcome::Success); + result.set_result(ResultType::ERR); + result.set_error_msg("Not safe to remove disk".to_string()); + let _ = respond_to_client(&result, &responder); + } + Err(e) => { + error!("safe to remove failed: {:?}", e); + // Response to client + result.set_value(false); + result.set_result(ResultType::ERR); + result.set_error_msg(e.to_string()); + let _ = respond_to_client(&result, &responder); + } + }; } - }; - } - Op::GetCreatedTickets => { - match get_jira_tickets(&responder, config_dir) { - Ok(_) => { - info!("Fetching jira tickets finished"); + Op::SafeToRemove => { + match safe_to_remove_disk( + &responder, + operation.get_disk(), + &backend_type, + config_dir, + ) { + Ok(_) => { + info!("Safe to remove disk finished"); + } + Err(e) => { + error!("Safe to remove error: {:?}", e); + } + }; } - Err(e) => { - error!("Fetching jira error: {:?}", e); + Op::GetCreatedTickets => { + match get_jira_tickets(&responder, config_dir) { + Ok(_) => { + info!("Fetching jira tickets finished"); + } + Err(e) => { + error!("Fetching jira error: {:?}", e); + } + }; } }; } @@ -353,25 +390,16 @@ fn listen( config_file.expect("Failed to load config"); let _ = notify_slack(&config, &format!("Reload disk-manager config file")).expect("Unable to connect to slack"); } - signal_hook::SIGINT | signal_hook::SIGCHLD => { - //skip this - debug!("Ignore signal"); - continue; - } - signal_hook::SIGTERM => { - //"gracefully" exit - debug!("Exit Process"); - break 'outer; - } - _ => unreachable!(), } } } + Err(_) => {} } - } + }); Ok(()) } +// send message to client fn respond_to_client(result: &T, s: &Socket) -> BynarResult<()> { let encoded = result.write_to_bytes()?; debug!("Responding to client with msg len: {}", encoded.len()); @@ -379,14 +407,17 @@ fn respond_to_client(result: &T, s: &Socket) -> BynarResul Ok(()) } +// add disk request function. Send the result through the sender channel back to the main thread. fn add_disk( - s: &Socket, + sender: &crossbeam_channel::Sender, d: &str, backend: &BackendType, id: Option, config_dir: &Path, ) -> BynarResult<()> { let mut result = OpOutcomeResult::new(); + result.set_disk(d.to_string()); + result.set_op_type(Op::Add); let backend = match backend::load_backend(backend, Some(config_dir)) { Ok(backend) => backend, Err(e) => { @@ -394,7 +425,7 @@ fn add_disk( result.set_error_msg(e.to_string()); // Bail early. We can't load the backend - let _ = respond_to_client(&result, s); + let _ = sender.send(result); return Ok(()); } }; @@ -410,7 +441,7 @@ fn add_disk( result.set_error_msg(e.to_string()); } }; - let _ = respond_to_client(&result, s); + let _ = sender.send(result); Ok(()) } @@ -469,16 +500,17 @@ fn get_partition_info(dev_path: &Path) -> BynarResult { Ok(partition_info) } -fn list_disks(s: &Socket) -> BynarResult<()> { +fn list_disks(c: &crossbeam_channel::Sender) -> BynarResult<()> { let disk_list: Vec = get_disks()?; let mut disks = Disks::new(); disks.set_disk(RepeatedField::from_vec(disk_list)); - debug!("Encoding disk list"); + /*debug!("Encoding disk list"); let encoded = disks.write_to_bytes()?; debug!("Responding to client with msg len: {}", encoded.len()); - s.send(&encoded, 0)?; + s.send(&encoded, 0)?;*/ + let _ = c.send(disks); Ok(()) } diff --git a/src/lib/error.rs b/src/lib/error.rs index 33a0355..7604ce3 100644 --- a/src/lib/error.rs +++ b/src/lib/error.rs @@ -10,6 +10,7 @@ use postgres::Error as PostgresError; use protobuf::ProtobufError; use pwd::PwdError; use r2d2::Error as R2d2Error; +use rayon::ThreadPoolBuildError; use reqwest::Error as ReqwestError; use serde_json::Error as SerdeJsonError; use slack_hook::Error as SlackError; @@ -79,6 +80,7 @@ pub enum BynarError { ProtobufError(ProtobufError), #[error(msg, non_std, no_from)] PwdError(PwdBError), + RayonError(ThreadPoolBuildError), R2d2Error(R2d2Error), #[error(msg, non_std)] RadosError(RadosError), From 50a46a3fbb08f074249c5ab6d03bae43a55050ad Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 27 Jan 2020 14:27:54 -0500 Subject: [PATCH 09/76] Changed disk-manager handler for all functions to use threadpool/DEALER sockets --- src/disk_manager.rs | 289 ++++++++++++++++++++++++++------------------ 1 file changed, 169 insertions(+), 120 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 5b1305f..0379c59 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -204,6 +204,7 @@ fn listen( // channel to send results from backend to main thread let (send_res, recv_res) = crossbeam_channel::unbounded::(); let (send_disk, recv_disk) = crossbeam_channel::unbounded::(); + let (send_ticket, recv_ticket) = crossbeam_channel::unbounded::(); pool.scope(|s| 'outer: loop { match responder.try_lock() { Ok(responder) => { @@ -217,6 +218,8 @@ fn listen( Ok(e) => e as zmq::PollEvents, }; let send_res = send_res.clone(); + let send_disk = send_disk.clone(); + let send_ticket = send_ticket.clone(); // is the socket readable? if (events & zmq::POLLIN) != 0 { let msg = responder.recv_bytes(0)?; @@ -244,7 +247,7 @@ fn listen( s.spawn(move |_| { let disk = operation.get_disk(); match add_disk( - &send_res.clone(), + &send_res, disk, &backend_type, id.clone(), @@ -258,106 +261,101 @@ fn listen( } } }); - /*match add_disk( - &responder, - operation.get_disk(), - &backend_type, - id, - config_dir, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } - };*/ } Op::AddPartition => { // } Op::List => { - match list_disks(&send_disk) { - Ok(_) => { - info!("List disks finished"); - } - Err(e) => { - error!("List disks error: {:?}", e); - } - }; + s.spawn(move |_| { + match list_disks(&send_disk) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } + }; + }); } Op::Remove => { let mut result = OpOutcomeResult::new(); - match safe_to_remove( - &Path::new(operation.get_disk()), - &backend_type, - config_dir, - ) { - Ok((OpOutcome::Success, true)) => { - match remove_disk( - &responder, - operation.get_disk(), - &backend_type, - config_dir, - ) { - Ok(_) => { - info!("Remove disk finished"); - } - Err(e) => { - error!("Remove disk error: {:?}", e); - } - }; - } - Ok((OpOutcome::Skipped, val)) => { - debug!("Disk skipped"); - result.set_outcome(OpOutcome::Skipped); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = respond_to_client(&result, &responder); - } - Ok((OpOutcome::SkipRepeat, val)) => { - debug!("Disk skipped, safe to remove already ran"); - result.set_outcome(OpOutcome::SkipRepeat); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = respond_to_client(&result, &responder); - } - Ok((_, false)) => { - debug!("Disk is not safe to remove"); - //Response to client - result.set_value(false); - result.set_outcome(OpOutcome::Success); - result.set_result(ResultType::ERR); - result.set_error_msg("Not safe to remove disk".to_string()); - let _ = respond_to_client(&result, &responder); - } - Err(e) => { - error!("safe to remove failed: {:?}", e); - // Response to client - result.set_value(false); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - let _ = respond_to_client(&result, &responder); - } - }; + result.set_disk(operation.get_disk().to_string()); + result.set_op_type(Op::Remove); + + s.spawn(move |_| { + match safe_to_remove( + &Path::new(operation.get_disk()), + &backend_type, + config_dir, + ) { + Ok((OpOutcome::Success, true)) => { + match remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + config_dir, + ) { + Ok(_) => { + info!("Remove disk finished"); + } + Err(e) => { + error!("Remove disk error: {:?}", e); + } + }; + } + Ok((OpOutcome::Skipped, val)) => { + debug!("Disk skipped"); + result.set_outcome(OpOutcome::Skipped); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send(result); + } + Ok((OpOutcome::SkipRepeat, val)) => { + debug!("Disk skipped, safe to remove already ran"); + result.set_outcome(OpOutcome::SkipRepeat); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send(result); + } + Ok((_, false)) => { + debug!("Disk is not safe to remove"); + //Response to client + result.set_value(false); + result.set_outcome(OpOutcome::Success); + result.set_result(ResultType::ERR); + result.set_error_msg("Not safe to remove disk".to_string()); + let _ = send_res.send(result); + } + Err(e) => { + error!("safe to remove failed: {:?}", e); + // Response to client + result.set_value(false); + result.set_result(ResultType::ERR); + result.set_error_msg(e.to_string()); + let _ = send_res.send(result); + } + }; + }); } Op::SafeToRemove => { - match safe_to_remove_disk( - &responder, - operation.get_disk(), - &backend_type, - config_dir, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); - } - Err(e) => { - error!("Safe to remove error: {:?}", e); - } - }; + s.spawn(move |_| { + match safe_to_remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + config_dir, + ) { + Ok(_) => { + info!("Safe to remove disk finished"); + } + Err(e) => { + error!("Safe to remove error: {:?}", e); + } + }; + }); } Op::GetCreatedTickets => { - match get_jira_tickets(&responder, config_dir) { + match get_jira_tickets(&send_ticket, config_dir) { Ok(_) => { info!("Fetching jira tickets finished"); } @@ -368,23 +366,66 @@ fn listen( } }; } - }; - } - if daemon { - while (now.elapsed() < Duration::from_millis(10)) { - for signal in signals.pending() { - match signal as c_int { - signal_hook::SIGHUP => { - //Reload the config file - debug!("Reload Config File"); - let config_file = helpers::load_config(config_dir, "disk-manager.json"); - if let Err(e) = config_file { - error!( - "Failed to load config file {}. error: {}", - config_dir.join("disk-manager.json").display(), - e - ); - return Ok(()); + // send completed requests (or error messages) + if (events & zmq::POLLOUT) != 0 { + //check disks first, since those are faster requests than add/remove reqs + match recv_disk.try_recv() { + Ok(result) => { + // send result back to client + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) + match recv_ticket.try_recv() { + Ok(result) => { + // send result back to client + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // no disks in the queue, check if any add/remove/safe-to-remove req results + match recv_res.try_recv() { + Ok(result) => { + // send result back to client + let _ = respond_to_client(&result, &responder); + } + Err(_) => {} //do nothing + } + } + } + } + } + } + if daemon { + while now.elapsed() < Duration::from_millis(10) { + for signal in signals.pending() { + match signal as c_int { + signal_hook::SIGHUP => { + //Reload the config file + debug!("Reload Config File"); + let config_file = + helpers::load_config(config_dir, "bynar.json"); + if let Err(e) = config_file { + error!( + "Failed to load config file {}. error: {}", + config_dir.join("bynar.json").display(), + e + ); + return Ok(()); + } + let config: ConfigSettings = + config_file.expect("Failed to load config"); + } + signal_hook::SIGINT | signal_hook::SIGCHLD => { + //skip this + debug!("Ignore signal"); + continue; + } + signal_hook::SIGTERM => { + //"gracefully" exit + debug!("Exit Process"); + break 'outer Ok(()); + } + _ => unreachable!(), } let config: DiskManagerConfig = config_file.expect("Failed to load config"); @@ -514,9 +555,16 @@ fn list_disks(c: &crossbeam_channel::Sender) -> BynarResult<()> { Ok(()) } -fn remove_disk(s: &Socket, d: &str, backend: &BackendType, config_dir: &Path) -> BynarResult<()> { +fn remove_disk( + sender: &crossbeam_channel::Sender, + d: &str, + backend: &BackendType, + config_dir: &Path, +) -> BynarResult<()> { //Returns OpOutcomeResult let mut result = OpOutcomeResult::new(); + result.set_disk(d.to_string()); + result.set_op_type(Op::Remove); let backend = match backend::load_backend(backend, Some(config_dir)) { Ok(b) => b, Err(e) => { @@ -524,7 +572,7 @@ fn remove_disk(s: &Socket, d: &str, backend: &BackendType, config_dir: &Path) -> result.set_error_msg(e.to_string()); // Bail early. We can't load the backend - let _ = respond_to_client(&result, s); + let _ = sender.send(result); return Ok(()); } }; @@ -538,7 +586,7 @@ fn remove_disk(s: &Socket, d: &str, backend: &BackendType, config_dir: &Path) -> result.set_error_msg(e.to_string()); } }; - let _ = respond_to_client(&result, s); + let _ = sender.send(result); Ok(()) } @@ -554,13 +602,15 @@ fn safe_to_remove( } fn safe_to_remove_disk( - s: &Socket, + sender: &crossbeam_channel::Sender, d: &str, backend: &BackendType, config_dir: &Path, ) -> BynarResult<()> { debug!("Checking if {} is safe to remove", d); let mut result = OpOutcomeResult::new(); + result.set_disk(d.to_string()); + result.set_op_type(Op::SafeToRemove); match safe_to_remove(&Path::new(d), &backend, &config_dir) { Ok((outcome, val)) => { debug!("Safe to remove: {}", val); @@ -572,19 +622,18 @@ fn safe_to_remove_disk( debug!("Safe to remove err: {}", e); result.set_result(ResultType::ERR); result.set_error_msg(e.to_string()); - let encoded = result.write_to_bytes()?; - debug!("Responding to client with msg len: {}", encoded.len()); - s.send(&encoded, 0)?; + let _ = sender.send(result); return Err(BynarError::new(format!("safe to remove error: {}", e))); } }; - let encoded = result.write_to_bytes()?; - debug!("Responding to client with msg len: {}", encoded.len()); - s.send(&encoded, 0)?; + let _ = sender.send(result); Ok(()) } -pub fn get_jira_tickets(s: &Socket, config_dir: &Path) -> BynarResult<()> { +pub fn get_jira_tickets( + sender: &crossbeam_channel::Sender, + config_dir: &Path, +) -> BynarResult<()> { let mut result = OpJiraTicketsResult::new(); let config: ConfigSettings = match helpers::load_config(&config_dir, "bynar.json") { Ok(p) => p, @@ -594,7 +643,7 @@ pub fn get_jira_tickets(s: &Socket, config_dir: &Path) -> BynarResult<()> { result.set_error_msg(e.to_string()); // unable to load config file - let _ = respond_to_client(&result, s); + let _ = sender.send(result); return Ok(()); } }; @@ -607,7 +656,7 @@ pub fn get_jira_tickets(s: &Socket, config_dir: &Path) -> BynarResult<()> { result.set_error_msg(e.to_string()); // unable to create DB connection - let _ = respond_to_client(&result, s); + let _ = sender.send(result); return Ok(()); } }; @@ -627,7 +676,7 @@ pub fn get_jira_tickets(s: &Socket, config_dir: &Path) -> BynarResult<()> { }) .collect(); result.set_tickets(RepeatedField::from_vec(proto_jira)); - let _ = respond_to_client(&result, s); + let _ = sender.send(result); Ok(()) } From 72f34565c062ae95f851864cf0784ce691eae8d7 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 29 Jan 2020 13:18:26 -0500 Subject: [PATCH 10/76] Swapped DEALER with STREAM in disk-manager --- src/disk_manager.rs | 332 ++++++++++++++++++++++++-------------------- 1 file changed, 180 insertions(+), 152 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 0379c59..888220f 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -189,7 +189,7 @@ fn listen( ) -> BynarResult<()> { debug!("Starting zmq listener with version({:?})", zmq::version()); let context = zmq::Context::new(); - let responder = context.socket(zmq::DEALER)?; + let responder = context.socket(zmq::STREAM)?; debug!("Listening on tcp://{}:5555", listen_address); // Fail to start if this fails @@ -199,12 +199,14 @@ fn listen( .is_ok()); debug!("Building thread pool"); + //Note, for now we are using 16 threads by default let pool = rayon::ThreadPoolBuilder::new().num_threads(16).build()?; let responder = Arc::new(Mutex::new(responder)); // channel to send results from backend to main thread - let (send_res, recv_res) = crossbeam_channel::unbounded::(); - let (send_disk, recv_disk) = crossbeam_channel::unbounded::(); - let (send_ticket, recv_ticket) = crossbeam_channel::unbounded::(); + let (send_res, recv_res) = crossbeam_channel::unbounded::<(Vec, OpOutcomeResult)>(); + let (send_disk, recv_disk) = crossbeam_channel::unbounded::<(Vec, Disks)>(); + let (send_ticket, recv_ticket) = + crossbeam_channel::unbounded::<(Vec, OpJiraTicketsResult)>(); pool.scope(|s| 'outer: loop { match responder.try_lock() { Ok(responder) => { @@ -217,175 +219,194 @@ fn listen( Err(e) => return Err(BynarError::from(e)), Ok(e) => e as zmq::PollEvents, }; - let send_res = send_res.clone(); - let send_disk = send_disk.clone(); - let send_ticket = send_ticket.clone(); // is the socket readable? if (events & zmq::POLLIN) != 0 { - let msg = responder.recv_bytes(0)?; - debug!("Got msg len: {}", msg.len()); - trace!("Parsing msg {:?} as hex", msg); - let operation = match parse_from_bytes::(&msg) { - Ok(bytes) => bytes, - Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - continue; - } - }; - - debug!("Operation requested: {:?}", operation.get_Op_type()); - if op_no_disk(&responder, &operation) { - continue; - } - match operation.get_Op_type() { - Op::Add => { - let id = if operation.has_osd_id() { - Some(operation.get_osd_id()) - } else { - None + //get the id first {STREAM sockets get messages with id prepended} + let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly + trace!("Client ID {:?}", client_id); + // get actual message + while responder.get_rcvmore()? { + let mut msg = responder.recv_bytes(0)?; + debug!("Got msg len: {}", msg.len()); + trace!("Parsing msg {:?} as hex", msg); + while msg.len() > 0 { + let operation = match parse_from_bytes::(&msg) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + continue; + } }; - s.spawn(move |_| { - let disk = operation.get_disk(); - match add_disk( - &send_res, - disk, - &backend_type, - id.clone(), - config_dir, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } + let client_id = client_id.clone(); + msg.drain(0..operation.write_to_bytes()?.len()); + let send_res = send_res.clone(); + let send_disk = send_disk.clone(); + let send_ticket = send_ticket.clone(); + + debug!("Operation requested: {:?}", operation.get_Op_type()); + if op_no_disk(&responder, &operation) { + continue; + } + match operation.get_Op_type() { + Op::Add => { + let id = if operation.has_osd_id() { + Some(operation.get_osd_id()) + } else { + None + }; + s.spawn(move |_| { + let disk = operation.get_disk(); + match add_disk( + &send_res, + disk, + &backend_type, + id.clone(), + config_dir, + client_id, + ) { + Ok(_) => { + info!("Add disk finished"); + } + Err(e) => { + error!("Add disk error: {:?}", e); + } + } + }); } - }); - } - Op::AddPartition => { - // - } - Op::List => { - s.spawn(move |_| { - match list_disks(&send_disk) { - Ok(_) => { - info!("List disks finished"); - } - Err(e) => { - error!("List disks error: {:?}", e); - } - }; - }); - } - Op::Remove => { - let mut result = OpOutcomeResult::new(); - result.set_disk(operation.get_disk().to_string()); - result.set_op_type(Op::Remove); - - s.spawn(move |_| { - match safe_to_remove( - &Path::new(operation.get_disk()), - &backend_type, - config_dir, - ) { - Ok((OpOutcome::Success, true)) => { - match remove_disk( + Op::AddPartition => { + // + } + Op::List => { + s.spawn(move |_| { + match list_disks(&send_disk, client_id) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } + }; + }); + } + Op::Remove => { + let mut result = OpOutcomeResult::new(); + result.set_disk(operation.get_disk().to_string()); + result.set_op_type(Op::Remove); + + s.spawn(move |_| { + match safe_to_remove( + &Path::new(operation.get_disk()), + &backend_type, + config_dir, + ) { + Ok((OpOutcome::Success, true)) => { + match remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + config_dir, + client_id + ) { + Ok(_) => { + info!("Remove disk finished"); + } + Err(e) => { + error!("Remove disk error: {:?}", e); + } + }; + } + Ok((OpOutcome::Skipped, val)) => { + debug!("Disk skipped"); + result.set_outcome(OpOutcome::Skipped); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send((client_id, result)); + } + Ok((OpOutcome::SkipRepeat, val)) => { + debug!("Disk skipped, safe to remove already ran"); + result.set_outcome(OpOutcome::SkipRepeat); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send((client_id, result)); + } + Ok((_, false)) => { + debug!("Disk is not safe to remove"); + //Response to client + result.set_value(false); + result.set_outcome(OpOutcome::Success); + result.set_result(ResultType::ERR); + result.set_error_msg( + "Not safe to remove disk".to_string(), + ); + let _ = send_res.send((client_id, result)); + } + Err(e) => { + error!("safe to remove failed: {:?}", e); + // Response to client + result.set_value(false); + result.set_result(ResultType::ERR); + result.set_error_msg(e.to_string()); + let _ = send_res.send((client_id, result)); + } + }; + }); + } + Op::SafeToRemove => { + s.spawn(move |_| { + match safe_to_remove_disk( &send_res, operation.get_disk(), &backend_type, config_dir, + client_id, ) { Ok(_) => { - info!("Remove disk finished"); + info!("Safe to remove disk finished"); } Err(e) => { - error!("Remove disk error: {:?}", e); + error!("Safe to remove error: {:?}", e); } }; - } - Ok((OpOutcome::Skipped, val)) => { - debug!("Disk skipped"); - result.set_outcome(OpOutcome::Skipped); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = send_res.send(result); - } - Ok((OpOutcome::SkipRepeat, val)) => { - debug!("Disk skipped, safe to remove already ran"); - result.set_outcome(OpOutcome::SkipRepeat); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = send_res.send(result); - } - Ok((_, false)) => { - debug!("Disk is not safe to remove"); - //Response to client - result.set_value(false); - result.set_outcome(OpOutcome::Success); - result.set_result(ResultType::ERR); - result.set_error_msg("Not safe to remove disk".to_string()); - let _ = send_res.send(result); - } - Err(e) => { - error!("safe to remove failed: {:?}", e); - // Response to client - result.set_value(false); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - let _ = send_res.send(result); - } - }; - }); - } - Op::SafeToRemove => { - s.spawn(move |_| { - match safe_to_remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - config_dir, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); - } - Err(e) => { - error!("Safe to remove error: {:?}", e); - } - }; - }); - } - Op::GetCreatedTickets => { - match get_jira_tickets(&send_ticket, config_dir) { - Ok(_) => { - info!("Fetching jira tickets finished"); + }); } - Err(e) => { - error!("Fetching jira error: {:?}", e); + Op::GetCreatedTickets => { + match get_jira_tickets(&send_ticket, config_dir, client_id) { + Ok(_) => { + info!("Fetching jira tickets finished"); + } + Err(e) => { + error!("Fetching jira error: {:?}", e); + } + }; } }; } - }; + } } // send completed requests (or error messages) if (events & zmq::POLLOUT) != 0 { //check disks first, since those are faster requests than add/remove reqs match recv_disk.try_recv() { - Ok(result) => { + Ok((client_id, result)) => { // send result back to client + //send client id back first + let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } Err(_) => { // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) match recv_ticket.try_recv() { - Ok(result) => { + Ok((client_id, result)) => { // send result back to client + let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } Err(_) => { // no disks in the queue, check if any add/remove/safe-to-remove req results match recv_res.try_recv() { - Ok(result) => { + Ok((client_id, result)) => { // send result back to client + let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } Err(_) => {} //do nothing @@ -450,11 +471,12 @@ fn respond_to_client(result: &T, s: &Socket) -> BynarResul // add disk request function. Send the result through the sender channel back to the main thread. fn add_disk( - sender: &crossbeam_channel::Sender, + sender: &crossbeam_channel::Sender<(Vec, OpOutcomeResult)>, d: &str, backend: &BackendType, id: Option, config_dir: &Path, + client_id: Vec, ) -> BynarResult<()> { let mut result = OpOutcomeResult::new(); result.set_disk(d.to_string()); @@ -466,7 +488,7 @@ fn add_disk( result.set_error_msg(e.to_string()); // Bail early. We can't load the backend - let _ = sender.send(result); + let _ = sender.send((client_id, result)); return Ok(()); } }; @@ -482,7 +504,7 @@ fn add_disk( result.set_error_msg(e.to_string()); } }; - let _ = sender.send(result); + let _ = sender.send((client_id, result)); Ok(()) } @@ -541,7 +563,10 @@ fn get_partition_info(dev_path: &Path) -> BynarResult { Ok(partition_info) } -fn list_disks(c: &crossbeam_channel::Sender) -> BynarResult<()> { +fn list_disks( + c: &crossbeam_channel::Sender<(Vec, Disks)>, + client_id: Vec, +) -> BynarResult<()> { let disk_list: Vec = get_disks()?; let mut disks = Disks::new(); @@ -551,15 +576,16 @@ fn list_disks(c: &crossbeam_channel::Sender) -> BynarResult<()> { debug!("Responding to client with msg len: {}", encoded.len()); s.send(&encoded, 0)?;*/ - let _ = c.send(disks); + let _ = c.send((client_id, disks)); Ok(()) } fn remove_disk( - sender: &crossbeam_channel::Sender, + sender: &crossbeam_channel::Sender<(Vec, OpOutcomeResult)>, d: &str, backend: &BackendType, config_dir: &Path, + client_id: Vec, ) -> BynarResult<()> { //Returns OpOutcomeResult let mut result = OpOutcomeResult::new(); @@ -572,7 +598,7 @@ fn remove_disk( result.set_error_msg(e.to_string()); // Bail early. We can't load the backend - let _ = sender.send(result); + let _ = sender.send((client_id, result)); return Ok(()); } }; @@ -586,7 +612,7 @@ fn remove_disk( result.set_error_msg(e.to_string()); } }; - let _ = sender.send(result); + let _ = sender.send((client_id, result)); Ok(()) } @@ -602,10 +628,11 @@ fn safe_to_remove( } fn safe_to_remove_disk( - sender: &crossbeam_channel::Sender, + sender: &crossbeam_channel::Sender<(Vec, OpOutcomeResult)>, d: &str, backend: &BackendType, config_dir: &Path, + client_id: Vec, ) -> BynarResult<()> { debug!("Checking if {} is safe to remove", d); let mut result = OpOutcomeResult::new(); @@ -622,17 +649,18 @@ fn safe_to_remove_disk( debug!("Safe to remove err: {}", e); result.set_result(ResultType::ERR); result.set_error_msg(e.to_string()); - let _ = sender.send(result); + let _ = sender.send((client_id, result)); return Err(BynarError::new(format!("safe to remove error: {}", e))); } }; - let _ = sender.send(result); + let _ = sender.send((client_id, result)); Ok(()) } pub fn get_jira_tickets( - sender: &crossbeam_channel::Sender, + sender: &crossbeam_channel::Sender<(Vec, OpJiraTicketsResult)>, config_dir: &Path, + client_id: Vec, ) -> BynarResult<()> { let mut result = OpJiraTicketsResult::new(); let config: ConfigSettings = match helpers::load_config(&config_dir, "bynar.json") { @@ -643,7 +671,7 @@ pub fn get_jira_tickets( result.set_error_msg(e.to_string()); // unable to load config file - let _ = sender.send(result); + let _ = sender.send((client_id, result)); return Ok(()); } }; @@ -656,7 +684,7 @@ pub fn get_jira_tickets( result.set_error_msg(e.to_string()); // unable to create DB connection - let _ = sender.send(result); + let _ = sender.send((client_id, result)); return Ok(()); } }; @@ -676,7 +704,7 @@ pub fn get_jira_tickets( }) .collect(); result.set_tickets(RepeatedField::from_vec(proto_jira)); - let _ = sender.send(result); + let _ = sender.send((client_id, result)); Ok(()) } From 9d4b09d51cf37ee750fa43e018d9fe8ca9c129be Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 30 Jan 2020 10:55:29 -0500 Subject: [PATCH 11/76] Added handling of sending messages from Bynar to disk-manager --- src/disk_manager.rs | 2 +- src/lib/lib.rs | 71 +++++++++++++++++++------- src/main.rs | 118 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 168 insertions(+), 23 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 888220f..cf2b076 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -305,7 +305,7 @@ fn listen( operation.get_disk(), &backend_type, config_dir, - client_id + client_id, ) { Ok(_) => { info!("Remove disk finished"); diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 3c62b01..8182f7a 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -55,14 +55,30 @@ pub fn get_vault_token(endpoint: &str, token: &str, hostname: &str) -> BynarResu Ok(res) } +/// send an operation request to the disk-manager +pub fn request(s: &Socket, op: Operation, client_id: Vec) -> BynarResult<()> { + //send the id first + s.send(client_id, zmq::SNDMORE)?; + let encoded = op.write_to_bytes().unwrap(); + debug!("Sending message"); + s.send(&encoded, 0)?; + Ok(()) +} + +/// send an add_disk request to the disk-manager pub fn add_disk_request( s: &Socket, path: &Path, id: Option, + client_id: Vec, simulate: bool, -) -> BynarResult { +) -> BynarResult<()> { + // { let mut o = Operation::new(); debug!("Creating add disk operation request"); + //send the id first + s.send(client_id, zmq::SNDMORE)?; + o.set_Op_type(Op::Add); o.set_disk(format!("{}", path.display())); o.set_simulate(simulate); @@ -73,8 +89,8 @@ pub fn add_disk_request( let encoded = o.write_to_bytes().unwrap(); debug!("Sending message"); s.send(&encoded, 0)?; - - debug!("Waiting for response"); + Ok(()) + /*debug!("Waiting for response"); let add_response = s.recv_bytes(0)?; debug!("Decoding msg len: {}", add_response.len()); let op_result = parse_from_bytes::(&add_response)?; @@ -90,7 +106,7 @@ pub fn add_disk_request( Err(BynarError::from("Add disk failed but error_msg not set")) } } - } + }*/ } /* @@ -114,9 +130,13 @@ pub fn check_disk_request(s: &mut Socket) -> Result { } */ -pub fn list_disks_request(s: &Socket) -> BynarResult> { +/// send a list disk request to the disk-manager +pub fn list_disks_request(s: &Socket, client_id: Vec) -> BynarResult<()> { + //BynarResult> { let mut o = Operation::new(); debug!("Creating list operation request"); + //send the id first + s.send(client_id, zmq::SNDMORE)?; o.set_Op_type(Op::List); debug!("Encoding as hex"); @@ -125,8 +145,8 @@ pub fn list_disks_request(s: &Socket) -> BynarResult> { debug!("Sending message"); s.send(&encoded, 0)?; - - debug!("Waiting for response"); + Ok(()) + /*debug!("Waiting for response"); let disks_response = s.recv_bytes(0)?; debug!("Decoding msg len: {}", disks_response.len()); let disk_list = parse_from_bytes::(&disks_response)?; @@ -136,36 +156,45 @@ pub fn list_disks_request(s: &Socket) -> BynarResult> { d.push(disk.clone()); } - Ok(d) + Ok(d)*/ } -pub fn safe_to_remove_request(s: &Socket, path: &Path) -> BynarResult<(OpOutcome, bool)> { +/// send safe-to-remove request to disk-manager +pub fn safe_to_remove_request(s: &Socket, path: &Path, client_id: Vec) -> BynarResult<()> { + //<(OpOutcome, bool)> { let mut o = Operation::new(); + //send the id first + s.send(client_id, zmq::SNDMORE)?; debug!("Creating safe to remove operation request"); o.set_Op_type(Op::SafeToRemove); o.set_disk(format!("{}", path.display())); let encoded = o.write_to_bytes()?; debug!("Sending message"); s.send(&encoded, 0)?; - - debug!("Waiting for response"); + Ok(()) + /*debug!("Waiting for response"); let safe_response = s.recv_bytes(0)?; debug!("Decoding msg len: {}", safe_response.len()); let op_result = parse_from_bytes::(&safe_response)?; match op_result.get_result() { ResultType::OK => Ok((op_result.get_outcome(), op_result.get_value())), ResultType::ERR => Err(BynarError::from(op_result.get_error_msg())), - } + }*/ } +/// Send a remove disk request to the disk_manager pub fn remove_disk_request( s: &Socket, path: &Path, id: Option, + client_id: Vec, simulate: bool, -) -> BynarResult { +) -> BynarResult<()> { + //BynarResult { let mut o = Operation::new(); debug!("Creating remove operation request"); + //send the id first + s.send(client_id, zmq::SNDMORE)?; o.set_Op_type(Op::Remove); o.set_disk(format!("{}", path.display())); o.set_simulate(simulate); @@ -176,8 +205,8 @@ pub fn remove_disk_request( let encoded = o.write_to_bytes()?; debug!("Sending message"); s.send(&encoded, 0)?; - - debug!("Waiting for response"); + Ok(()) + /*debug!("Waiting for response"); let remove_response = s.recv_bytes(0)?; debug!("Decoding msg len: {}", remove_response.len()); let op_result = match parse_from_bytes::(&remove_response) { @@ -199,7 +228,7 @@ pub fn remove_disk_request( Err(BynarError::from("Remove disk failed but error_msg not set")) } } - } + }*/ } // default filename for daemon_output @@ -261,15 +290,19 @@ pub struct DBConfig { pub dbname: String, } -pub fn get_jira_tickets(s: &Socket) -> BynarResult<()> { +/// get the list of JIRA tickets from disk-manager +pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { let mut o = Operation::new(); + //send the id first + s.send(client_id, zmq::SNDMORE)?; debug!("calling get_jira_tickets "); o.set_Op_type(Op::GetCreatedTickets); let encoded = o.write_to_bytes()?; debug!("Sending message in get_jira_tickets"); s.send(&encoded, 0)?; + Ok(()) - debug!("Waiting for response: get_jira_tickets"); + /*debug!("Waiting for response: get_jira_tickets"); let tickets_response = s.recv_bytes(0)?; debug!("Decoding msg len: {}", tickets_response.len()); @@ -297,5 +330,5 @@ pub fn get_jira_tickets(s: &Socket) -> BynarResult<()> { )) } } - } + }*/ } diff --git a/src/main.rs b/src/main.rs index 516c495..6afc1cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -27,7 +27,7 @@ mod util; use crate::create_support_ticket::{create_support_ticket, ticket_resolved}; use crate::in_progress::*; use crate::test_disk::{State, StateMachine}; -use api::service::{Op, OpOutcome}; +use api::service::{Op, OpOutcome, OpOutcomeResult}; use clap::{crate_authors, crate_version, App, Arg}; use daemonize::Daemonize; use helpers::{error::*, host_information::Host, ConfigSettings}; @@ -39,7 +39,9 @@ use signal_hook::iterator::Signals; use signal_hook::*; use simplelog::{CombinedLogger, Config, SharedLogger, TermLogger, WriteLogger}; use slack_hook::{PayloadBuilder, Slack}; + use std::collections::HashMap; +use std::collections::VecDeque; use std::fs::{create_dir, read_to_string, File, OpenOptions}; use std::path::{Path, PathBuf}; use std::process; @@ -49,8 +51,21 @@ use std::time::{Duration, Instant}; // a specific operation and its outcome #[derive(Debug, Clone)] struct DiskOp { - op_type: Op, // operation type - ret_val: Option, //None if outcome not yet determined + pub op_type: Op, // operation type + pub description: Option, // the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) + pub operaton_id: Option, // the operation id if one exists (for safe-to-remove, remove request handling) + pub ret_val: Option, //None if outcome not yet determined +} + +impl DiskOp { + pub fn new(op: Operation, description: Option, operation_id: Option) -> DiskOp { + DiskOp { + op_type: op.get_Op_type(), + description, + operation_id, + ret_val: None, + } + } } // create a message map to handle list of disk-manager requests @@ -624,6 +639,103 @@ fn add_repaired_disks( Ok(()) } +// send a requst and update the message map +fn send_and_update( + s: &Socket, + message_map: &mut HashMap>>, + client_id: Vec, + (mess, desc, op_id): (Operation, Option, Option), + path: &PathBuf, +) -> BynarResult<()> { + trace!("Send request {:?}", mess); + request(s, mess, client_id)?; + //add or update to message_map if path != emptyyyy + if mess.get_disk() != "" { + trace!("add operation to map"); + //check optype, make op + let disk_op = DiskOp::new(mess, desc, op_id); + add_or_update_map_op(message_map, &path, disk_op)?; + } + Ok(()) +} + +// check if the socket is readable/writable and send/recieve message if possible +fn send_and_recieve( + s: &Socket, + message_map: &mut HashMap>>, + message_queue: &mut VecDeque<(Operation, Option, Option)>, + client_id: Vec, +) -> BynarResult<()> { + // Note, all client sent messages are Operation, while return values can be OpJiraTicketResult, Disks, or OpOutcomeResult + let events = match s.get_events() { + Err(zmq::Error::EBUSY) => { + debug!("Socket Busy, skip"); + return Ok(()); + } + Err(e) => { + error!("Get Client Socket Events errored...{:?}", e); + return Err(BynarError::from(e)); + } + Ok(e) => e, + }; + //check sendable first + if (events & zmq::POLLOUT) != 0 { + //dequeue from message_queue if it isn't empty + if let Some((mess, desc, op_id)) = message_queue.pop_front() { + // if mess.op_type() == Op::Remove, check if Safe-To-Remove in map complete + // if not, send to end of queue (push_back) + let path = Path::new(mess.get_disk()).to_path_buf(); + //check if there was a previous request, and whether it was completed + if let Some(disk_op) = get_map_op(&message_map, &path.to_path_buf()) { + // check if Safe-to-remove returned yet + if let Some(val) = disk_op.ret_val { + // check if mess is a Remove op + if mess.op_type() == Op::Remove { + // check success outcome + if val.get_outcome() == OpOutcome::Success && val.get_value() { + //then ok to run Op::Remove + send_and_update( + s, + &mut message_map, + client_id, + (mess, desc, op_id), + &path, + )?; + } + // safe-to-remove returned false or error'd so we should not remove but let manual handling + // delete the remove request in this case (in otherwords, do nothing) + } else { + // not remove request, since previous request is complete, run next request + // this technically shouldn't happen though, so print an error! + error!( + "Previous request {:?} has finished, but hasn't been reset", + disk_op.op_type + ); + send_and_update( + s, + &mut message_map, + client_id, + (mess, desc, op_id), + &path, + )?; + } + } else { + // we haven't gotten response from previous request yet, push request to back of queue + message_queue.push_back((mess, desc, op_id)); + } + } else { + // safe to run the op. In the case of Remove op, it shouldn't be possible to NOT + // have a safe-to-remove run before (it's always safe-to-remove then remove) + // however since the remove operation will run safe-to-remove anyways, it's fine to just run + send_and_update(s, &mut message_map, client_id, (mess, desc, op_id), &path)?; + } + } + } + // can get response + if (events & zmq::POLLIN != 0) {} + Ok(()) +} + // 1. Gather a list of all the disks // 2. Check every disk // 3. Decide if a disk needs to be replaced From 6fd3fb8ab3071b470b6171bd2d4bb1721d7f1f99 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 30 Jan 2020 15:16:02 -0500 Subject: [PATCH 12/76] Added macros, handling add_disk --- src/lib/lib.rs | 42 ++++++++++++++++++ src/main.rs | 117 ++++++++++++++++++++++++++++++++++++++++++++++--- src/util.rs | 8 ++++ 3 files changed, 160 insertions(+), 7 deletions(-) diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 8182f7a..6692f0a 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -290,6 +290,48 @@ pub struct DBConfig { pub dbname: String, } +/// get message(s) from the socket +pub fn get_messages(s: Socket) -> BynarResult> { + let msg = s.recv_bytes(0)?; + let id = msg.clone(); + if s.get_rcvmore() { + return s.recv_bytes(0)?; + } +} + +#[macro_export] +/// Create a new Operation +macro_rules! make_op { + ($op_type: ident) => { + let mut o = Operation::new(); + o.set_Op_type(Op::$op_type); + o + }; + ($op_type:ident, $disk_path:expr) => { + let mut o = Operation::new(); + o.set_Op_type(Op::$op_type); + o.set_disk($disk_path); + o + }; + ($op_type:ident, $disk_path:expr, $simulate:expr) => { + let mut o = Operation::new(); + o.set_Op_type(Op::$op_type); + o.set_disk($disk_path); + o.set_simulate(simulate); + o + }; + ($op_type:ident, $disk_path:expr, $simulate:expr, $id:expr) => { + let mut o = Operation::new(); + o.set_Op_type(Op::$op_type); + o.set_disk($disk_path); + o.set_simulate(simulate); + if let Some(osd_id) = $id { + o.set_osd_id(osd_id); + } + o + }; +} + /// get the list of JIRA tickets from disk-manager pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { let mut o = Operation::new(); diff --git a/src/main.rs b/src/main.rs index 6afc1cc..0037fe3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -27,7 +27,7 @@ mod util; use crate::create_support_ticket::{create_support_ticket, ticket_resolved}; use crate::in_progress::*; use crate::test_disk::{State, StateMachine}; -use api::service::{Op, OpOutcome, OpOutcomeResult}; +use api::service::{Op, OpJiraTicketsResult, OpOutcome, OpOutcomeResult}; use clap::{crate_authors, crate_version, App, Arg}; use daemonize::Daemonize; use helpers::{error::*, host_information::Host, ConfigSettings}; @@ -51,8 +51,10 @@ use std::time::{Duration, Instant}; // a specific operation and its outcome #[derive(Debug, Clone)] struct DiskOp { - pub op_type: Op, // operation type - pub description: Option, // the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) + pub op_type: Op, // operation type + // the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) + // Or, if an add_disk request, description is the ticket_id + pub description: Option, pub operaton_id: Option, // the operation id if one exists (for safe-to-remove, remove request handling) pub ret_val: Option, //None if outcome not yet determined } @@ -573,14 +575,17 @@ fn check_for_failed_hardware( Ok(()) } +// Actually, this function now checks the outstanding tickets, and if any of them are resolved, adds +// an add_disk request to the message_queue fn add_repaired_disks( - config: &ConfigSettings, - host_info: &Host, + //config: &ConfigSettings, + //host_info: &Host, + message_queue: &mut VecDeque<(Operation, Option, Option)>, pool: &Pool, storage_detail_id: u32, simulate: bool, ) -> BynarResult<()> { - let public_key = get_public_key(&config, &host_info)?; + //let public_key = get_public_key(&config, &host_info)?; info!("Getting outstanding repair tickets"); let tickets = in_progress::get_outstanding_repair_tickets(&pool, storage_detail_id)?; @@ -589,7 +594,21 @@ fn add_repaired_disks( for ticket in tickets { match ticket_resolved(config, &ticket.ticket_id.to_string()) { Ok(true) => { + debug!("Creating add disk operation request"); + let o = make_op!( + Add, + format!("{}", Path::new(&ticket.device_path).display()), + simulate + ); + /*let mut o = Operation::new(); + o.set_Op_type(Op::Add); + o.set_disk(format!("{}", Path::new(&ticket.device_path).display())); + o.set_simulate(simulate);*/ + ticket_id = Some(ticket.ticket_id.to_string()); + message_queue.push_back((o, ticket_id, None)); //CALL RPC + // add add_disk request to message_queue + /* debug!("Connecting to disk-manager"); let socket = helpers::connect( &config.manager_host, @@ -626,6 +645,7 @@ fn add_repaired_disks( error!("Failed to add disk: {:?}", e); } }; + */ } Ok(false) => {} Err(e) => { @@ -659,6 +679,71 @@ fn send_and_update( Ok(()) } +// handle the return value from an add_disk request +fn handle_add_disk_res(pool: &Pool, outcome: OpOutcome, ticket_id: String) { + match outcome { + OpOutcome::Success => debug!("Disk added successfully. Updating database record"), + // Disk was either boot or something that shouldn't be added via backend + OpOutcome::Skipped => debug!("Disk Skipped. Updating database record"), + // Disk is already in the cluster + OpOutcome::SkipRepeat => debug!("Disk already added. Skipping. Updating database record"), + } + match in_progress::resolve_ticket_in_db(pool, &ticket.ticket_id) { + Ok(_) => debug!("Database updated"), + Err(e) => error!("Failed to resolve ticket {}. {:?}", ticket.ticket_id, e), + }; +} + +//handle return of Operation +fn handle_operation_result( + message_map: &mut HashMap>>, + pool: &Pool, + op_res: OpOutcomeResult, +) -> BynarResult<()> { + match op_result.get_result() { + ResultType::OK => {} + ResultType::Err => { + if op_res.has_error_msg() { + let msg = op_res.get_error_msg(); + match op_res.get_op_type() { + Op::Add => { + error!("Add disk failed : {}", msg); + return Err(BynarError::from(msg)); + } + Op::Remove => { + error!("Remove disk failed : {}", msg); + return Err(BynarError::from(msg)); + } + Op::SafeToRemove => { + error!("SafeToRemove disk failed : {}", msg); + return Err(BynarError::from(msg)); + } + _ => {} + } + } + } + } + + match op_res.get_op_type() { + Op::Add => { + let path = Path::new(&op_res.get_disk()); + if let Some(disk_op) = get_map_op(message_map, path.to_path_buf())? { + if let Some(ticket_id) = disk_op.description { + handle_add_disk_res(pool, op_res.get_outcome(), ticket_id: String) + } + } + error!( + "Unable to get current operation in the map for {}", + path.display() + ); + return Err(BynarError::from(format!( + "Unable to get current operation in the map for {}", + path.display() + ))); + } + } +} + // check if the socket is readable/writable and send/recieve message if possible fn send_and_recieve( s: &Socket, @@ -732,7 +817,25 @@ fn send_and_recieve( } } // can get response - if (events & zmq::POLLIN != 0) {} + if (events & zmq::POLLIN != 0) { + // get the message, it should be either a OpOutcomeResult, or OpJiraTicketsResult + // NOTE: disks is not an option since list_disks is not a request that the main bynar program makes + let mut message = get_messages(s)?; + // skip empty initial message, and keep looping until no more messages from disk-manager + while message.len() > 0 { + // get message + match get_message!(OpOutcomeResult, &message) { + Ok(outcome) => { + message.drain(0..outcome.write_to_bytes()?.len()); + } + Err(_) => { + // must be tickets, since list_disks is never requested by bynar main program + let tickets = get_message!(OpJiraTicketsResult, &message)?; + message.drain(0..ticket.write_to_bytes()?.len()); + } + } + } + } Ok(()) } diff --git a/src/util.rs b/src/util.rs index 58bd1a5..9561863 100644 --- a/src/util.rs +++ b/src/util.rs @@ -12,3 +12,11 @@ macro_rules! evaluate { results }; } + +// parse object of type type_name from vec mess +#[macro_export] +macro_rules! get_message { + ($type_name:ty, $mess:expr) => { + parse_from_bytes::($mess) + }; +} From 47675499453471223875cd782d5cc7140e8893d4 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 30 Jan 2020 15:52:35 -0500 Subject: [PATCH 13/76] Fix a little client stuff --- src/client.rs | 46 +++++++++++++++++++++++++++++++++++++++++----- src/lib/lib.rs | 37 +++++++++++++++++++------------------ src/main.rs | 10 +++++----- src/util.rs | 4 +++- 4 files changed, 68 insertions(+), 29 deletions(-) diff --git a/src/client.rs b/src/client.rs index 7c94c39..fc23eed 100644 --- a/src/client.rs +++ b/src/client.rs @@ -5,20 +5,56 @@ use std::path::Path; use std::str::FromStr; //use disk_manager::disk_manager; -use api::service::{Disk, OpOutcome}; +use api::service::{Disk, OpOutcome, ResultType, OpOutcomeResult}; use clap::{crate_authors, crate_version, App, Arg, ArgMatches, SubCommand}; -use helpers::error::BynarResult; +use helpers::error::{BynarResult, BynarError}; use hostname::get_hostname; -use log::{error, info, trace}; +use log::{error, info, trace, debug}; use simplelog::{CombinedLogger, Config, TermLogger, WriteLogger}; use zmq::Socket; + +#[macro_use] +mod util; /* CLI client to call functions over RPC */ fn add_disk(s: &Socket, path: &Path, id: Option, simulate: bool) -> BynarResult { - let outcome = helpers::add_disk_request(s, path, id, simulate)?; - Ok(outcome) + let client_id = s.get_identity().unwrap(); + helpers::add_disk_request(s, path, id, client_id, simulate)?; + //loop until socket is readable, then get the response + loop { + let events = match s.get_events() { + Err(zmq::Error::EBUSY) => { + debug!("Socket Busy, skip"); + continue; + } + Err(e) => { + error!("Get Client Socket Events errored...{:?}", e); + return Err(BynarError::from(e)); + } + Ok(e) => e, + }; + // got response + if (events & zmq::POLLIN != 0) { + let mut message = helpers::get_messages(s)?; + let op_result = get_message!(OpOutcomeResult, &message)?; + match op_result.get_result() { + ResultType::OK => return Ok(op_result.get_outcome()), + ResultType::ERR => { + if op_result.has_error_msg() { + let msg = op_result.get_error_msg(); + error!("Add disk failed: {}", msg); + return Err(BynarError::from(op_result.get_error_msg())); + } else { + error!("Add disk failed but error_msg not set"); + return Err(BynarError::from("Add disk failed but error_msg not set")); + } + } + } + } + } + return Err(BynarError::from(format!("Failed Add_disk loop"))); } fn list_disks(s: &Socket) -> BynarResult> { diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 6692f0a..4e6ab16 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -58,7 +58,7 @@ pub fn get_vault_token(endpoint: &str, token: &str, hostname: &str) -> BynarResu /// send an operation request to the disk-manager pub fn request(s: &Socket, op: Operation, client_id: Vec) -> BynarResult<()> { //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; let encoded = op.write_to_bytes().unwrap(); debug!("Sending message"); s.send(&encoded, 0)?; @@ -77,7 +77,7 @@ pub fn add_disk_request( let mut o = Operation::new(); debug!("Creating add disk operation request"); //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; o.set_Op_type(Op::Add); o.set_disk(format!("{}", path.display())); @@ -136,7 +136,7 @@ pub fn list_disks_request(s: &Socket, client_id: Vec) -> BynarResult<()> { let mut o = Operation::new(); debug!("Creating list operation request"); //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; o.set_Op_type(Op::List); debug!("Encoding as hex"); @@ -164,7 +164,7 @@ pub fn safe_to_remove_request(s: &Socket, path: &Path, client_id: Vec) -> By //<(OpOutcome, bool)> { let mut o = Operation::new(); //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; debug!("Creating safe to remove operation request"); o.set_Op_type(Op::SafeToRemove); o.set_disk(format!("{}", path.display())); @@ -194,7 +194,7 @@ pub fn remove_disk_request( let mut o = Operation::new(); debug!("Creating remove operation request"); //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; o.set_Op_type(Op::Remove); o.set_disk(format!("{}", path.display())); o.set_simulate(simulate); @@ -294,49 +294,50 @@ pub struct DBConfig { pub fn get_messages(s: Socket) -> BynarResult> { let msg = s.recv_bytes(0)?; let id = msg.clone(); - if s.get_rcvmore() { - return s.recv_bytes(0)?; + if s.get_rcvmore()? { + return Ok(s.recv_bytes(0)?); } + Ok(vec![]) } #[macro_export] /// Create a new Operation macro_rules! make_op { - ($op_type: ident) => { + ($op_type: ident) => {{ let mut o = Operation::new(); o.set_Op_type(Op::$op_type); o - }; - ($op_type:ident, $disk_path:expr) => { + }}; + ($op_type:ident, $disk_path:expr) => {{ let mut o = Operation::new(); o.set_Op_type(Op::$op_type); o.set_disk($disk_path); o - }; - ($op_type:ident, $disk_path:expr, $simulate:expr) => { + }}; + ($op_type:ident, $disk_path:expr, $simulate:expr) => {{ let mut o = Operation::new(); o.set_Op_type(Op::$op_type); o.set_disk($disk_path); - o.set_simulate(simulate); + o.set_simulate($simulate); o - }; - ($op_type:ident, $disk_path:expr, $simulate:expr, $id:expr) => { + }}; + ($op_type:ident, $disk_path:expr, $simulate:expr, $id:expr) => {{ let mut o = Operation::new(); o.set_Op_type(Op::$op_type); o.set_disk($disk_path); - o.set_simulate(simulate); + o.set_simulate($simulate); if let Some(osd_id) = $id { o.set_osd_id(osd_id); } o - }; + }}; } /// get the list of JIRA tickets from disk-manager pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { let mut o = Operation::new(); //send the id first - s.send(client_id, zmq::SNDMORE)?; + s.send(&client_id, zmq::SNDMORE)?; debug!("calling get_jira_tickets "); o.set_Op_type(Op::GetCreatedTickets); let encoded = o.write_to_bytes()?; diff --git a/src/main.rs b/src/main.rs index 0037fe3..fe2c115 100644 --- a/src/main.rs +++ b/src/main.rs @@ -27,7 +27,7 @@ mod util; use crate::create_support_ticket::{create_support_ticket, ticket_resolved}; use crate::in_progress::*; use crate::test_disk::{State, StateMachine}; -use api::service::{Op, OpJiraTicketsResult, OpOutcome, OpOutcomeResult}; +use api::service::{Op, OpJiraTicketsResult, OpOutcome, OpOutcomeResult, Operation, ResultType}; use clap::{crate_authors, crate_version, App, Arg}; use daemonize::Daemonize; use helpers::{error::*, host_information::Host, ConfigSettings}; @@ -578,7 +578,7 @@ fn check_for_failed_hardware( // Actually, this function now checks the outstanding tickets, and if any of them are resolved, adds // an add_disk request to the message_queue fn add_repaired_disks( - //config: &ConfigSettings, + config: &ConfigSettings, //host_info: &Host, message_queue: &mut VecDeque<(Operation, Option, Option)>, pool: &Pool, @@ -595,7 +595,7 @@ fn add_repaired_disks( match ticket_resolved(config, &ticket.ticket_id.to_string()) { Ok(true) => { debug!("Creating add disk operation request"); - let o = make_op!( + let op = helpers::make_op!( Add, format!("{}", Path::new(&ticket.device_path).display()), simulate @@ -604,8 +604,8 @@ fn add_repaired_disks( o.set_Op_type(Op::Add); o.set_disk(format!("{}", Path::new(&ticket.device_path).display())); o.set_simulate(simulate);*/ - ticket_id = Some(ticket.ticket_id.to_string()); - message_queue.push_back((o, ticket_id, None)); + let tid = Some(ticket.ticket_id.to_string()); + message_queue.push_back((op, tid, None)); //CALL RPC // add add_disk request to message_queue /* diff --git a/src/util.rs b/src/util.rs index 9561863..cbfc59a 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,3 +1,5 @@ + + #[macro_export] macro_rules! evaluate { ($e: expr, $i: ident, $err: expr, $e_ident: ident) => { @@ -17,6 +19,6 @@ macro_rules! evaluate { #[macro_export] macro_rules! get_message { ($type_name:ty, $mess:expr) => { - parse_from_bytes::($mess) + protobuf::parse_from_bytes::<$type_name>($mess) }; } From c3a59d69cb15471a7ebc382c193a2aa576a81b48 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 31 Jan 2020 10:07:21 -0500 Subject: [PATCH 14/76] Some client fixing + macros --- src/client.rs | 28 +++++++++------------------- src/lib/lib.rs | 2 +- src/main.rs | 44 +++++++++++++++++++------------------------- src/util.rs | 20 ++++++++++++++++++-- 4 files changed, 47 insertions(+), 47 deletions(-) diff --git a/src/client.rs b/src/client.rs index fc23eed..96d6431 100644 --- a/src/client.rs +++ b/src/client.rs @@ -3,13 +3,14 @@ use std::fs::{read_to_string, File}; use std::path::Path; use std::str::FromStr; +use std::io::{Error, ErrorKind, Write}; //use disk_manager::disk_manager; -use api::service::{Disk, OpOutcome, ResultType, OpOutcomeResult}; +use api::service::{Disk, OpOutcome, OpOutcomeResult, ResultType}; use clap::{crate_authors, crate_version, App, Arg, ArgMatches, SubCommand}; -use helpers::error::{BynarResult, BynarError}; +use helpers::error::{BynarError, BynarResult}; use hostname::get_hostname; -use log::{error, info, trace, debug}; +use log::{debug, error, info, trace}; use simplelog::{CombinedLogger, Config, TermLogger, WriteLogger}; use zmq::Socket; @@ -20,23 +21,13 @@ mod util; */ fn add_disk(s: &Socket, path: &Path, id: Option, simulate: bool) -> BynarResult { - let client_id = s.get_identity().unwrap(); + let client_id = s.get_identity()?; helpers::add_disk_request(s, path, id, client_id, simulate)?; //loop until socket is readable, then get the response loop { - let events = match s.get_events() { - Err(zmq::Error::EBUSY) => { - debug!("Socket Busy, skip"); - continue; - } - Err(e) => { - error!("Get Client Socket Events errored...{:?}", e); - return Err(BynarError::from(e)); - } - Ok(e) => e, - }; + let events = poll_events!(s, continue); // got response - if (events & zmq::POLLIN != 0) { + if (events as i16 & zmq::POLLIN != 0) { let mut message = helpers::get_messages(s)?; let op_result = get_message!(OpOutcomeResult, &message)?; match op_result.get_result() { @@ -44,17 +35,16 @@ fn add_disk(s: &Socket, path: &Path, id: Option, simulate: bool) -> BynarRe ResultType::ERR => { if op_result.has_error_msg() { let msg = op_result.get_error_msg(); - error!("Add disk failed: {}", msg); + //error!("Add disk failed: {}", msg); return Err(BynarError::from(op_result.get_error_msg())); } else { - error!("Add disk failed but error_msg not set"); + //error!("Add disk failed but error_msg not set"); return Err(BynarError::from("Add disk failed but error_msg not set")); } } } } } - return Err(BynarError::from(format!("Failed Add_disk loop"))); } fn list_disks(s: &Socket) -> BynarResult> { diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 4e6ab16..9f4ed0e 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -291,7 +291,7 @@ pub struct DBConfig { } /// get message(s) from the socket -pub fn get_messages(s: Socket) -> BynarResult> { +pub fn get_messages(s: &Socket) -> BynarResult> { let msg = s.recv_bytes(0)?; let id = msg.clone(); if s.get_rcvmore()? { diff --git a/src/main.rs b/src/main.rs index fe2c115..d4facda 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,12 +33,15 @@ use daemonize::Daemonize; use helpers::{error::*, host_information::Host, ConfigSettings}; use libc::c_int; use log::{debug, error, info, trace, warn}; +use protobuf::parse_from_bytes; +use protobuf::Message as ProtobufMsg; use r2d2::Pool; use r2d2_postgres::PostgresConnectionManager as ConnectionManager; use signal_hook::iterator::Signals; use signal_hook::*; use simplelog::{CombinedLogger, Config, SharedLogger, TermLogger, WriteLogger}; use slack_hook::{PayloadBuilder, Slack}; +use zmq::Socket; use std::collections::HashMap; use std::collections::VecDeque; @@ -47,6 +50,7 @@ use std::path::{Path, PathBuf}; use std::process; use std::process::Command; use std::time::{Duration, Instant}; +use std::io::{Error, ErrorKind, Write}; // a specific operation and its outcome #[derive(Debug, Clone)] @@ -55,7 +59,7 @@ struct DiskOp { // the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) // Or, if an add_disk request, description is the ticket_id pub description: Option, - pub operaton_id: Option, // the operation id if one exists (for safe-to-remove, remove request handling) + pub operation_id: Option, // the operation id if one exists (for safe-to-remove, remove request handling) pub ret_val: Option, //None if outcome not yet determined } @@ -668,7 +672,7 @@ fn send_and_update( path: &PathBuf, ) -> BynarResult<()> { trace!("Send request {:?}", mess); - request(s, mess, client_id)?; + helpers::request(s, mess, client_id)?; //add or update to message_map if path != emptyyyy if mess.get_disk() != "" { trace!("add operation to map"); @@ -688,9 +692,9 @@ fn handle_add_disk_res(pool: &Pool, outcome: OpOutcome, ticke // Disk is already in the cluster OpOutcome::SkipRepeat => debug!("Disk already added. Skipping. Updating database record"), } - match in_progress::resolve_ticket_in_db(pool, &ticket.ticket_id) { + match in_progress::resolve_ticket_in_db(pool, &ticket_id) { Ok(_) => debug!("Database updated"), - Err(e) => error!("Failed to resolve ticket {}. {:?}", ticket.ticket_id, e), + Err(e) => error!("Failed to resolve ticket {}. {:?}", ticket_id, e), }; } @@ -700,9 +704,9 @@ fn handle_operation_result( pool: &Pool, op_res: OpOutcomeResult, ) -> BynarResult<()> { - match op_result.get_result() { + match op_res.get_result() { ResultType::OK => {} - ResultType::Err => { + ResultType::ERR => { if op_res.has_error_msg() { let msg = op_res.get_error_msg(); match op_res.get_op_type() { @@ -727,9 +731,9 @@ fn handle_operation_result( match op_res.get_op_type() { Op::Add => { let path = Path::new(&op_res.get_disk()); - if let Some(disk_op) = get_map_op(message_map, path.to_path_buf())? { + if let Some(disk_op) = get_map_op(message_map, &path.to_path_buf())? { if let Some(ticket_id) = disk_op.description { - handle_add_disk_res(pool, op_res.get_outcome(), ticket_id: String) + handle_add_disk_res(pool, op_res.get_outcome(), ticket_id); } } error!( @@ -752,30 +756,20 @@ fn send_and_recieve( client_id: Vec, ) -> BynarResult<()> { // Note, all client sent messages are Operation, while return values can be OpJiraTicketResult, Disks, or OpOutcomeResult - let events = match s.get_events() { - Err(zmq::Error::EBUSY) => { - debug!("Socket Busy, skip"); - return Ok(()); - } - Err(e) => { - error!("Get Client Socket Events errored...{:?}", e); - return Err(BynarError::from(e)); - } - Ok(e) => e, - }; + let events = poll_events!(s, return Ok(())); //check sendable first - if (events & zmq::POLLOUT) != 0 { + if (events as i16 & zmq::POLLOUT) != 0 { //dequeue from message_queue if it isn't empty if let Some((mess, desc, op_id)) = message_queue.pop_front() { // if mess.op_type() == Op::Remove, check if Safe-To-Remove in map complete // if not, send to end of queue (push_back) let path = Path::new(mess.get_disk()).to_path_buf(); //check if there was a previous request, and whether it was completed - if let Some(disk_op) = get_map_op(&message_map, &path.to_path_buf()) { + if let Some(disk_op) = get_map_op(&message_map, &path.to_path_buf())? { // check if Safe-to-remove returned yet if let Some(val) = disk_op.ret_val { // check if mess is a Remove op - if mess.op_type() == Op::Remove { + if mess.get_Op_type() == Op::Remove { // check success outcome if val.get_outcome() == OpOutcome::Success && val.get_value() { //then ok to run Op::Remove @@ -817,10 +811,10 @@ fn send_and_recieve( } } // can get response - if (events & zmq::POLLIN != 0) { + if (events as i16 & zmq::POLLIN != 0) { // get the message, it should be either a OpOutcomeResult, or OpJiraTicketsResult // NOTE: disks is not an option since list_disks is not a request that the main bynar program makes - let mut message = get_messages(s)?; + let mut message = helpers::get_messages(s)?; // skip empty initial message, and keep looping until no more messages from disk-manager while message.len() > 0 { // get message @@ -831,7 +825,7 @@ fn send_and_recieve( Err(_) => { // must be tickets, since list_disks is never requested by bynar main program let tickets = get_message!(OpJiraTicketsResult, &message)?; - message.drain(0..ticket.write_to_bytes()?.len()); + message.drain(0..tickets.write_to_bytes()?.len()); } } } diff --git a/src/util.rs b/src/util.rs index cbfc59a..f7f9898 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,5 +1,3 @@ - - #[macro_export] macro_rules! evaluate { ($e: expr, $i: ident, $err: expr, $e_ident: ident) => { @@ -22,3 +20,21 @@ macro_rules! get_message { protobuf::parse_from_bytes::<$type_name>($mess) }; } + + +#[macro_export] +macro_rules! poll_events { + ($s:expr, $ret:expr) => { + match $s.get_events() { + Err(zmq::Error::EBUSY) => { + debug!("Socket Busy, skip"); + $ret; + } + Err(e) => { + error!("Get Client Socket Events errored...{:?}", e); + return Err(BynarError::from(e)); + } + Ok(e) => e, + } + } +} From 02a9ac458ce9188365c8b0728b2028a262aa8a8b Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 31 Jan 2020 11:02:22 -0500 Subject: [PATCH 15/76] Updated Zmq version --- Cargo.lock | 59 +++++++++--------- Cargo.toml | 2 +- src/backend/ceph.rs | 15 +++-- src/client.rs | 141 +++++++++++++++++++++++++++++++------------- src/disk_manager.rs | 13 ++-- src/lib/lib.rs | 2 +- src/main.rs | 19 +++--- src/util.rs | 19 +++++- 8 files changed, 175 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bc0d26..f826b5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,7 +74,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "backtrace" -version = "0.3.42" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", @@ -273,7 +273,7 @@ dependencies = [ "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "uname 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "zmq 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", + "zmq 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -638,7 +638,7 @@ dependencies = [ [[package]] name = "dtoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -695,7 +695,7 @@ name = "error-chain" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.42 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -703,7 +703,7 @@ name = "error-chain" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.42 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -712,7 +712,7 @@ name = "failure" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.42 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1374,9 +1374,9 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.27 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1475,7 +1475,7 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.26" +version = "0.10.27" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1483,7 +1483,7 @@ dependencies = [ "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1493,10 +1493,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl-sys" -version = "0.9.53" +version = "0.9.54" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1550,7 +1550,7 @@ dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2053,14 +2053,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "utf8-ranges 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2263,7 +2263,7 @@ name = "serde_urlencoded" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2358,7 +2358,7 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2851,7 +2851,7 @@ name = "unicode-normalization" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3079,17 +3079,18 @@ dependencies = [ [[package]] name = "zmq" -version = "0.8.3" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "zmq-sys 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", + "zmq-sys 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "zmq-sys" -version = "0.8.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3107,7 +3108,7 @@ dependencies = [ "checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum backtrace 0.3.42 (registry+https://github.com/rust-lang/crates.io-index)" = "b4b1549d804b6c73f4817df2ba073709e96e426f12987127c48e6745568c350b" +"checksum backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)" = "7f80256bc78f67e7df7e36d77366f636ed976895d91fe2ab9efa3973e8fe8c4f" "checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" "checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" @@ -3164,7 +3165,7 @@ dependencies = [ "checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" "checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" "checksum dmi 0.1.0 (git+https://github.com/cholcombe973/dmi)" = "" -"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" +"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" "checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" "checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38" @@ -3259,9 +3260,9 @@ dependencies = [ "checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" "checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" "checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3cc5799d98e1088141b8e01ff760112bbd9f19d850c124500566ca6901a585" +"checksum openssl 0.10.27 (registry+https://github.com/rust-lang/crates.io-index)" = "e176a45fedd4c990e26580847a525e39e16ec32ac78957dbf62ded31b3abfd6f" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)" = "465d16ae7fc0e313318f7de5cecf57b2fbe7511fd213978b457e1c96ff46736f" +"checksum openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)" = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986" "checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" "checksum parking_lot 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" "checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" @@ -3323,7 +3324,7 @@ dependencies = [ "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" "checksum regex 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ee84f70c8c08744ea9641a731c7fadb475bf2ecc52d7f627feb833e0b3990467" -"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" +"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" "checksum relay 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" "checksum reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)" = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" @@ -3359,7 +3360,7 @@ dependencies = [ "checksum slack-hook 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50e26b33762cd2ec755267c4a4af36adb0864b93afbe595ea8ff61b5528f4c11" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44e59e0c9fa00817912ae6e4e6e3c4fe04455e75699d06eedc7d85917ed8e8f4" +"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" "checksum socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85" "checksum static_assertions 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7f3eb36b47e512f8f1c9e3d10c2c1965bc992bd9cdb024fa581e2194501c83d3" "checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" @@ -3441,5 +3442,5 @@ dependencies = [ "checksum winutil 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7daf138b6b14196e3830a588acf1e86966c694d3e8fb026fb105b8b5dca07e6e" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xml-rs 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7ec6c39eaa68382c8e31e35239402c0a9489d4141a8ceb0c716099a0b515b562" -"checksum zmq 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff1c5e9ff4ac9c2847b2e72ada1c1eb3e188adb49fe3f1dd3bbcdc47f414d17" -"checksum zmq-sys 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b770cf495ad41e920ab25c465f35b7eec09d0ce8d7c892a7c8334a5e0de037c" +"checksum zmq 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aad98a7a617d608cd9e1127147f630d24af07c7cd95ba1533246d96cbdd76c66" +"checksum zmq-sys 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d33a2c51dde24d5b451a2ed4b488266df221a5eaee2ee519933dc46b9a9b3648" diff --git a/Cargo.toml b/Cargo.toml index 6c67aed..9479762 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,7 +84,7 @@ tempdir = "~0.3" time = "~0.1" uname = "~0.1" uuid = { version="~0.7", features = ["serde", "v4"]} -zmq = {version="~0.8"} +zmq = {version="~0.9"} diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index f16d062..366967b 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -484,7 +484,7 @@ impl CephBackend { ceph_bluestore_tool(&lv_dev_name, &mount_point, simulate)?; let host_info = Host::new()?; - let gb_capacity = vg_size / 1_073_741_824; + //let gb_capacity = vg_size / 1_073_741_824; let osd_weight = 0.0; debug!( "Adding OSD {} to crushmap under host {} with weight: {}", @@ -578,7 +578,7 @@ impl CephBackend { debug!("Saving ceph keyring"); save_keyring(new_osd_id, &auth_key, None, None, simulate)?; let host_info = Host::new()?; - let gb_capacity = info.capacity / 1_073_741_824; + //let gb_capacity = info.capacity / 1_073_741_824; let osd_weight = 0.0; //gb_capacity as f64 * 0.001_f64; debug!( "Adding OSD {} to crushmap under host {} with weight: {}", @@ -781,7 +781,10 @@ impl CephBackend { match read_link(Path::new(&journal_path)) { Ok(path) => return Ok(Some(path)), Err(e) => { - error!("Bad journal symlink. journal no longer points to valid UUID"); + error!( + "Bad journal symlink. journal no longer points to valid UUID {}", + e + ); return Ok(None); } } @@ -800,7 +803,7 @@ impl CephBackend { if let Some(parent_path) = block_utils::get_parent_devpath_from_path(&journal_path)? { //check if parent device is in journal devices trace!("Parent path is {}", parent_path.display()); - let mut journal_devices = self + let journal_devices = self .config .journal_devices .clone() @@ -1356,7 +1359,7 @@ impl CephBackend { debug!("Gradually weighting osd: {}", osd_id); //set noscrub (remember to handle error by unsetting noscrub) self.set_noscrub(simulate)?; - while (self.incremental_weight_osd(osd_id, is_add, simulate)?) { + while self.incremental_weight_osd(osd_id, is_add, simulate)? { trace!("incrementally reweighting osd"); } Ok(()) @@ -1514,7 +1517,7 @@ fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path) -> BynarResult< } //might be a Bluestore lvm, check the ceph-volume let ceph_volumes = ceph_volume_list(&cluster_handle)?; - for (id, meta) in ceph_volumes { + for (_id, meta) in ceph_volumes { for data in meta { match data.metadata { LvmData::Osd(data) => { diff --git a/src/client.rs b/src/client.rs index 96d6431..84cab0f 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,12 +1,14 @@ /// This is built into a separate binary called bynar-client //mod disk_manager; use std::fs::{read_to_string, File}; +use std::io::{Error, ErrorKind, Read, Write}; use std::path::Path; use std::str::FromStr; -use std::io::{Error, ErrorKind, Write}; //use disk_manager::disk_manager; -use api::service::{Disk, OpOutcome, OpOutcomeResult, ResultType}; +use api::service::{ + Disk, Disks, JiraInfo, OpJiraTicketsResult, OpOutcome, OpOutcomeResult, ResultType, +}; use clap::{crate_authors, crate_version, App, Arg, ArgMatches, SubCommand}; use helpers::error::{BynarError, BynarResult}; use hostname::get_hostname; @@ -20,46 +22,67 @@ mod util; CLI client to call functions over RPC */ -fn add_disk(s: &Socket, path: &Path, id: Option, simulate: bool) -> BynarResult { - let client_id = s.get_identity()?; +fn add_disk( + s: &Socket, + path: &Path, + id: Option, + client_id: Vec, + simulate: bool, +) -> BynarResult { helpers::add_disk_request(s, path, id, client_id, simulate)?; //loop until socket is readable, then get the response loop { let events = poll_events!(s, continue); // got response - if (events as i16 & zmq::POLLIN != 0) { - let mut message = helpers::get_messages(s)?; + if events.contains(zmq::PollEvents::POLLIN) { + let message = helpers::get_messages(s)?; let op_result = get_message!(OpOutcomeResult, &message)?; - match op_result.get_result() { - ResultType::OK => return Ok(op_result.get_outcome()), - ResultType::ERR => { - if op_result.has_error_msg() { - let msg = op_result.get_error_msg(); - //error!("Add disk failed: {}", msg); - return Err(BynarError::from(op_result.get_error_msg())); - } else { - //error!("Add disk failed but error_msg not set"); - return Err(BynarError::from("Add disk failed but error_msg not set")); - } - } - } + get_op_result!(op_result, add_disk); } } } -fn list_disks(s: &Socket) -> BynarResult> { - let disks = helpers::list_disks_request(s)?; - println!("disk list: {:?}", disks); - - Ok(disks) +fn list_disks(s: &Socket, client_id: Vec) -> BynarResult> { + helpers::list_disks_request(s, client_id)?; + //loop until socket is readable, then get the response + loop { + let events = poll_events!(s, continue); + // got response + if events.contains(zmq::PollEvents::POLLIN) { + let message = helpers::get_messages(s)?; + let disks = get_message!(Disks, &message)?; + let mut d: Vec = Vec::new(); + for disk in disks.get_disk() { + d.push(disk.clone()); + } + println!("disk list: {:?}", d); + return Ok(d); + } + } } -fn remove_disk(s: &Socket, path: &Path, id: Option, simulate: bool) -> BynarResult { - let outcome = helpers::remove_disk_request(s, path, id, simulate)?; - Ok(outcome) +fn remove_disk( + s: &Socket, + path: &Path, + id: Option, + client_id: Vec, + simulate: bool, +) -> BynarResult { + helpers::remove_disk_request(s, path, id, client_id, simulate)?; + + //loop until socket is readable, then get the response + loop { + let events = poll_events!(s, continue); + // got response + if events.contains(zmq::PollEvents::POLLIN) { + let message = helpers::get_messages(s)?; + let op_result = get_message!(OpOutcomeResult, &message)?; + get_op_result!(op_result, remove_disk); + } + } } -fn handle_add_disk(s: &Socket, matches: &ArgMatches<'_>) { +fn handle_add_disk(s: &Socket, matches: &ArgMatches<'_>, client_id: Vec) { let p = Path::new(matches.value_of("path").unwrap()); info!("Adding disk: {}", p.display()); let id = match matches.value_of("id") { @@ -70,7 +93,7 @@ fn handle_add_disk(s: &Socket, matches: &ArgMatches<'_>) { Some(s) => bool::from_str(&s).unwrap(), None => false, }; - match add_disk(s, &p, id, simulate) { + match add_disk(s, &p, id, client_id, simulate) { Ok(outcome) => match outcome { OpOutcome::Success => println!("Adding disk successful"), OpOutcome::Skipped => println!("Disk cannot be added, Skipping"), @@ -82,9 +105,9 @@ fn handle_add_disk(s: &Socket, matches: &ArgMatches<'_>) { }; } -fn handle_list_disks(s: &Socket) { +fn handle_list_disks(s: &Socket, client_id: Vec) { info!("Listing disks"); - match list_disks(s) { + match list_disks(s, client_id) { Ok(disks) => { println!("Disk list: {:?}", disks); } @@ -94,14 +117,45 @@ fn handle_list_disks(s: &Socket) { }; } -fn handle_jira_tickets(s: &Socket) -> BynarResult<()> { +fn handle_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { trace!("handle_jira_tickets called"); - helpers::get_jira_tickets(s)?; - trace!("handle_jira_tickets Finished"); - Ok(()) + helpers::get_jira_tickets(s, client_id)?; + //loop until socket is readable, then get the response + loop { + let events = poll_events!(s, continue); + // got response + if events.contains(zmq::PollEvents::POLLIN) { + let message = helpers::get_messages(s)?; + let tickets = get_message!(OpJiraTicketsResult, &message)?; + match tickets.get_result() { + ResultType::OK => { + debug!("got tickets successfully"); + let proto_jira = tickets.get_tickets(); + let mut _jira: Vec = Vec::new(); + for JiraInfo in proto_jira { + debug!("get_ticket_id: {}", JiraInfo.get_ticket_id()); + debug!("get_server_name: {}", JiraInfo.get_server_name()); + } + return Ok(()); + } + ResultType::ERR => { + if tickets.has_error_msg() { + let msg = tickets.get_error_msg(); + error!("get jira tickets failed : {}", msg); + return Err(BynarError::from(tickets.get_error_msg())); + } else { + error!("Get jira tickets failed but error_msg not set"); + return Err(BynarError::from( + "Get jira tickets failed but error_msg not set", + )); + } + } + } + } + } } -fn handle_remove_disk(s: &Socket, matches: &ArgMatches<'_>) { +fn handle_remove_disk(s: &Socket, matches: &ArgMatches<'_>, client_id: Vec) { let p = Path::new(matches.value_of("path").unwrap()); info!("Removing disk: {}", p.display()); let id = match matches.value_of("id") { @@ -112,7 +166,7 @@ fn handle_remove_disk(s: &Socket, matches: &ArgMatches<'_>) { Some(s) => bool::from_str(&s).unwrap(), None => false, }; - match remove_disk(s, &p, id, simulate) { + match remove_disk(s, &p, id, client_id, simulate) { Ok(outcome) => match outcome { OpOutcome::Success => println!("Removing disk successful"), OpOutcome::Skipped => println!("Disk cannot be removed. Skipping"), @@ -246,7 +300,9 @@ fn main() { ), ]); info!("Starting up"); - let server_pubkey = read_to_string(matches.value_of("server_key").unwrap()).unwrap(); + let mut server_pubkey = Vec::new(); + let mut keyfile = File::open(matches.value_of("server_key").unwrap()).unwrap(); + keyfile.read_to_end(&mut server_pubkey).unwrap(); let s = match helpers::connect(host, port, &server_pubkey) { Ok(s) => s, @@ -255,17 +311,18 @@ fn main() { return; } }; + let client_id: Vec = s.get_identity().unwrap(); if let Some(ref matches) = matches.subcommand_matches("add") { - handle_add_disk(&s, matches); + handle_add_disk(&s, matches, client_id.clone()); } if matches.subcommand_matches("list").is_some() { - handle_list_disks(&s); + handle_list_disks(&s, client_id.clone()); } if let Some(ref matches) = matches.subcommand_matches("remove") { - handle_remove_disk(&s, matches); + handle_remove_disk(&s, matches, client_id.clone()); } if let Some(ref _matches) = matches.subcommand_matches("get_jira_tickets") { - match handle_jira_tickets(&s) { + match handle_jira_tickets(&s, client_id.clone()) { Ok(()) => {} Err(e) => println!("Get JIRA tickets failed {}", e), }; diff --git a/src/disk_manager.rs b/src/disk_manager.rs index cf2b076..6bceee6 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -136,14 +136,14 @@ fn setup_curve(s: &Socket, config_dir: &Path, vault: bool) -> BynarResult<()> { let client = VaultClient::new(endpoint.as_str(), token)?; client.set_secret( format!("{}/{}.pem", config_dir.display(), hostname), - String::from_utf8_lossy(keypair.public_key.as_bytes()), + String::from_utf8_lossy(&keypair.public_key), )?; s.set_curve_secretkey(&keypair.secret_key)?; } else { debug!("Creating new curve keypair"); s.set_curve_secretkey(&keypair.secret_key)?; let mut f = File::create(key_file)?; - f.write_all(keypair.public_key.as_bytes())?; + f.write_all(&keypair.public_key)?; } debug!("Server mechanism: {:?}", s.get_mechanism()); debug!("Curve server: {:?}", s.is_curve_server()); @@ -220,7 +220,7 @@ fn listen( Ok(e) => e as zmq::PollEvents, }; // is the socket readable? - if (events & zmq::POLLIN) != 0 { + if events.contains(zmq::PollEvents::POLLIN) { //get the id first {STREAM sockets get messages with id prepended} let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly trace!("Client ID {:?}", client_id); @@ -384,7 +384,7 @@ fn listen( } } // send completed requests (or error messages) - if (events & zmq::POLLOUT) != 0 { + if events.contains(zmq::PollEvents::POLLOUT) { //check disks first, since those are faster requests than add/remove reqs match recv_disk.try_recv() { Ok((client_id, result)) => { @@ -433,8 +433,7 @@ fn listen( ); return Ok(()); } - let config: ConfigSettings = - config_file.expect("Failed to load config"); + config = config_file.expect("Failed to load config"); } signal_hook::SIGINT | signal_hook::SIGCHLD => { //skip this @@ -457,7 +456,7 @@ fn listen( } Err(_) => {} } - }); + })?; Ok(()) } diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 9f4ed0e..681bda7 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -31,7 +31,7 @@ where Ok(deserialized) } -pub fn connect(host: &str, port: &str, server_publickey: &str) -> BynarResult { +pub fn connect(host: &str, port: &str, server_publickey: &[u8]) -> BynarResult { debug!("Starting zmq sender with version({:?})", zmq::version()); let context = zmq::Context::new(); let requester = context.socket(zmq::DEALER)?; diff --git a/src/main.rs b/src/main.rs index d4facda..a0e9959 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,11 +46,11 @@ use zmq::Socket; use std::collections::HashMap; use std::collections::VecDeque; use std::fs::{create_dir, read_to_string, File, OpenOptions}; +use std::io::{Error, ErrorKind, Read, Write}; use std::path::{Path, PathBuf}; use std::process; use std::process::Command; use std::time::{Duration, Instant}; -use std::io::{Error, ErrorKind, Write}; // a specific operation and its outcome #[derive(Debug, Clone)] @@ -276,7 +276,7 @@ fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { Ok(()) } -fn get_public_key(config: &ConfigSettings, host_info: &Host) -> BynarResult { +fn get_public_key(config: &ConfigSettings, host_info: &Host) -> BynarResult> { // If vault_endpoint and token are set we should get the key from vault // Otherwise we need to know where the public_key is located? if config.vault_endpoint.is_some() && config.vault_token.is_some() { @@ -293,7 +293,7 @@ fn get_public_key(config: &ConfigSettings, host_info: &Host) -> BynarResult BynarResult, Option)> = VecDeque::new(); 'outer: loop { let now = Instant::now(); match check_for_failed_disks( @@ -1053,7 +1058,7 @@ fn main() { }; match add_repaired_disks( &config, - &host_info, + &mut message_queue, &db_pool, host_details_mapping.storage_detail_id, simulate, diff --git a/src/util.rs b/src/util.rs index f7f9898..5d862d7 100644 --- a/src/util.rs +++ b/src/util.rs @@ -21,7 +21,6 @@ macro_rules! get_message { }; } - #[macro_export] macro_rules! poll_events { ($s:expr, $ret:expr) => { @@ -36,5 +35,21 @@ macro_rules! poll_events { } Ok(e) => e, } - } + }; +} + +#[macro_export] +macro_rules! get_op_result { + ($op_result:expr, $type_op:ident) => { + match $op_result.get_result() { + ResultType::OK => return Ok($op_result.get_outcome()), + ResultType::ERR => { + if $op_result.has_error_msg() { + return Err(BynarError::from($op_result.get_error_msg())); + } else { + return Err(BynarError::from("$type_op failed but error_msg not set")); + } + } + } + }; } From 770147fea3be0ba43b45ea84239ecf617c142f34 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 31 Jan 2020 11:42:26 -0500 Subject: [PATCH 16/76] fix notify? --- src/disk_manager.rs | 15 +++++++++------ src/main.rs | 33 +++++++++++++++++++++++++-------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 6bceee6..00a4a89 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -424,16 +424,22 @@ fn listen( //Reload the config file debug!("Reload Config File"); let config_file = - helpers::load_config(config_dir, "bynar.json"); + helpers::load_config(config_dir, "disk-manager.json"); if let Err(e) = config_file { error!( "Failed to load config file {}. error: {}", - config_dir.join("bynar.json").display(), + config_dir.join("disk-manager.json").display(), e ); return Ok(()); } - config = config_file.expect("Failed to load config"); + let config: DiskManagerConfig = + config_file.expect("Failed to load config"); + let _ = notify_slack( + &config, + &format!("Reload disk-manager config file"), + ) + .expect("Unable to connect to slack"); } signal_hook::SIGINT | signal_hook::SIGCHLD => { //skip this @@ -447,9 +453,6 @@ fn listen( } _ => unreachable!(), } - let config: DiskManagerConfig = - config_file.expect("Failed to load config"); - let _ = notify_slack(&config, &format!("Reload disk-manager config file")).expect("Unable to connect to slack"); } } } diff --git a/src/main.rs b/src/main.rs index a0e9959..6005ff5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -333,6 +333,7 @@ fn add_disk_to_description( fn check_for_failed_disks( config: &ConfigSettings, + message_queue: &mut VecDeque<(Operation, Option, Option)>, host_info: &Host, pool: &Pool, host_mapping: &HostDetailsMapping, @@ -377,7 +378,18 @@ fn check_for_failed_disks( debug!("Device is already in the repair queue"); } (false, false) => { - debug!("Asking disk-manager if it's safe to remove disk"); + debug!("Sending Safe-to-Remove and Remove requests"); + let op_id = match state_machine.block_device.operation_id { + None => { + error!( + "Operation not recorded for {}", + state_machine.block_device.dev_path.display() + ); + 0 + } + Some(i) => i, + }; + /*debug!("Asking disk-manager if it's safe to remove disk"); // CALL RPC let socket = helpers::connect( &config.manager_host, @@ -461,7 +473,7 @@ fn check_for_failed_disks( let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; + add_or_update_operation_detail(pool, &mut operation_detail)?;*/ } (..) => {} } @@ -674,9 +686,9 @@ fn send_and_update( path: &PathBuf, ) -> BynarResult<()> { trace!("Send request {:?}", mess); - helpers::request(s, mess, client_id)?; + helpers::request(s, mess.clone(), client_id)?; //add or update to message_map if path != emptyyyy - if mess.get_disk() != "" { + if mess.clone().get_disk() != "" { trace!("add operation to map"); //check optype, make op let disk_op = DiskOp::new(mess, desc, op_id); @@ -732,7 +744,7 @@ fn handle_operation_result( match op_res.get_op_type() { Op::Add => { - let path = Path::new(&op_res.get_disk()); + let path = Path::new(op_res.get_disk()); if let Some(disk_op) = get_map_op(message_map, &path.to_path_buf())? { if let Some(ticket_id) = disk_op.description { handle_add_disk_res(pool, op_res.get_outcome(), ticket_id); @@ -749,6 +761,10 @@ fn handle_operation_result( path.display() ))); } + _ => { + //need to prep other stuff + Ok(()) + } } } @@ -779,7 +795,7 @@ fn send_and_recieve( //then ok to run Op::Remove send_and_update( s, - &mut message_map, + message_map, client_id, (mess, desc, op_id), &path, @@ -796,7 +812,7 @@ fn send_and_recieve( ); send_and_update( s, - &mut message_map, + message_map, client_id, (mess, desc, op_id), &path, @@ -810,7 +826,7 @@ fn send_and_recieve( // safe to run the op. In the case of Remove op, it shouldn't be possible to NOT // have a safe-to-remove run before (it's always safe-to-remove then remove) // however since the remove operation will run safe-to-remove anyways, it's fine to just run - send_and_update(s, &mut message_map, client_id, (mess, desc, op_id), &path)?; + send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; } } } @@ -1028,6 +1044,7 @@ fn main() { let now = Instant::now(); match check_for_failed_disks( &config, + &mut message_queue, &host_info, &db_pool, &host_details_mapping, From bf586c5ec8e5f2a281327357b4766893bd902e59 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 3 Feb 2020 14:17:20 -0500 Subject: [PATCH 17/76] Filter states in returned state machines and clippy fixes --- src/backend/ceph.rs | 82 ++++---- src/client.rs | 2 +- src/disk_manager.rs | 469 ++++++++++++++++++++++---------------------- src/lib/lib.rs | 3 +- src/main.rs | 101 ++++++---- src/test_disk.rs | 2 +- 6 files changed, 333 insertions(+), 326 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 366967b..9d0ef80 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -212,7 +212,7 @@ fn validate_config(config: &mut CephConfig, cluster_handle: &Rados) -> BynarResu } if config.increment < 0.0 { warn!("input increment < 0, flipping to positive value"); - config.increment = config.increment * -1.0; + config.increment *= -1.0; } if config.increment == 0.0 { return Err(BynarError::from("increment is 0.0")); @@ -220,7 +220,7 @@ fn validate_config(config: &mut CephConfig, cluster_handle: &Rados) -> BynarResu if config.backfill_cap == 0 { return Err(BynarError::from("backfill cap is 0")); } - if config.latency_cap == 0.0 || config.latency_cap < 0.0 { + if config.latency_cap <= 0.0 { return Err(BynarError::from("latency cap is less than or equal to 0.0")); } let names = osd_pool_ls(cluster_handle)?; @@ -1226,7 +1226,7 @@ impl CephBackend { ]) .output()?; let output = String::from_utf8_lossy(&output_child.stdout).to_lowercase(); - let lines: Vec<&str> = output.split("\n").collect(); + let lines: Vec<&str> = output.split('\n').collect(); for line in lines { if line.contains("average latency") { let attr: Vec<&str> = line.split_whitespace().collect(); @@ -1281,10 +1281,10 @@ impl CephBackend { ))); } } - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "Could not find Osd {} in crush map", osd_id - ))); + ))) } // incrementally weight the osd. return true if reweight ongoing, false if finished @@ -1377,14 +1377,11 @@ impl Backend for CephBackend { return Ok(OpOutcome::Skipped); } // check if the osd id, if given, is already in the cluster - match id { - Some(osd_id) => { - if is_osd_id_in_cluster(&self.cluster_handle, osd_id) { - error!("Osd ID {} is already in the cluster. Skipping", osd_id); - return Ok(OpOutcome::Skipped); - } + if let Some(osd_id) = id { + if osd_metadata_by_id(&self.cluster_handle, osd_id).is_ok() { + error!("Osd ID {} is already in the cluster. Skipping", osd_id); + return Ok(OpOutcome::Skipped); } - None => {} } // check if the disk is already in the cluster if is_device_in_cluster(&self.cluster_handle, device)? { @@ -1466,19 +1463,18 @@ impl Backend for CephBackend { } //check if manual bluestore let osd_config = get_osd_config_by_path(&self.config, device)?; - let osd_id; - if !osd_config.is_lvm { + let osd_id = if !osd_config.is_lvm { let mut part2: String = device.to_string_lossy().to_string(); part2.truncate(part2.len() - 1); part2.push_str("2"); let part2 = Path::new(&part2); debug!("CHECKING PATH {}", part2.display()); //get the osd id - osd_id = get_osd_id_from_device(&self.cluster_handle, part2)?; + get_osd_id_from_device(&self.cluster_handle, part2)? } else { //get the osd id - osd_id = get_osd_id_from_device(&self.cluster_handle, device)?; - } + get_osd_id_from_device(&self.cluster_handle, device)? + }; // create and send the command to check if the osd is safe to remove Ok(( OpOutcome::Success, @@ -1519,31 +1515,20 @@ fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path) -> BynarResult< let ceph_volumes = ceph_volume_list(&cluster_handle)?; for (_id, meta) in ceph_volumes { for data in meta { - match data.metadata { - LvmData::Osd(data) => { - //check if devices contains the device path - for device in data.devices { - if device == path { - return Ok(true); - } + //skip other lvm types + if let LvmData::Osd(data) = data.metadata { + //check if devices contains the device path + for device in data.devices { + if device == path { + return Ok(true); } } - //skip other lvm types - _ => {} } } } Ok(false) } -// Check if an osd_id is already in the cluster -fn is_osd_id_in_cluster(cluster_handle: &Rados, osd_id: u64) -> bool { - match osd_metadata_by_id(cluster_handle, osd_id) { - Ok(_) => true, - Err(_) => false, - } -} - /// get the osd id from the device path using the osd metadata (Needs modification for Bluestore) /// Note: need to use ceph-volume lvm list to (potentially) get the osd ID for a Bluestore osd, /// if looping over osd metadata doesn't work (on the plus side, ceph-volume lvm list only works @@ -1579,23 +1564,20 @@ fn get_osd_id_from_device(cluster_handle: &Rados, dev_path: &Path) -> BynarResul let ceph_volumes = ceph_volume_list(&cluster_handle)?; for (id, meta) in ceph_volumes { for data in meta { - match data.metadata { - LvmData::Osd(data) => { - //check if devices contains the device path - for device in data.devices { - if device == path { - return Ok(id.parse::()?); - } + //skip other lvm types + if let LvmData::Osd(data) = data.metadata { + //check if devices contains the device path + for device in data.devices { + if device == path { + return Ok(id.parse::()?); } } - //skip other lvm types - _ => {} } } } - Err(BynarError::new(format!( - "unable to find the osd in the osd metadata" - ))) + Err(BynarError::new( + "unable to find the osd in the osd metadata".to_string(), + )) } fn save_keyring( @@ -2187,7 +2169,9 @@ fn create_bluestore_man_partitions(path: &Path) -> BynarResult<()> { Some(p1) => p1.last_lba, None => { error!("First partition does not exist!"); - return Err(BynarError::from(format!("First partition does not exist!"))); + return Err(BynarError::from( + "First partition does not exist!".to_string(), + )); } }; let size = last - first_end; @@ -2206,7 +2190,9 @@ fn create_bluestore_man_partitions(path: &Path) -> BynarResult<()> { Some(p1) => p1.last_lba, None => { error!("First partition does not exist!"); - return Err(BynarError::from(format!("First partition does not exist!"))); + return Err(BynarError::from( + "First partition does not exist!".to_string(), + )); } }; let size = last - first_end; diff --git a/src/client.rs b/src/client.rs index 84cab0f..da2aa03 100644 --- a/src/client.rs +++ b/src/client.rs @@ -322,7 +322,7 @@ fn main() { handle_remove_disk(&s, matches, client_id.clone()); } if let Some(ref _matches) = matches.subcommand_matches("get_jira_tickets") { - match handle_jira_tickets(&s, client_id.clone()) { + match handle_jira_tickets(&s, client_id) { Ok(()) => {} Err(e) => println!("Get JIRA tickets failed {}", e), }; diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 00a4a89..6b973a2 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -208,256 +208,250 @@ fn listen( let (send_ticket, recv_ticket) = crossbeam_channel::unbounded::<(Vec, OpJiraTicketsResult)>(); pool.scope(|s| 'outer: loop { - match responder.try_lock() { - Ok(responder) => { - let now = Instant::now(); - let events = match responder.get_events() { - Err(zmq::Error::EBUSY) => { - trace!("Socket Busy, skip"); - continue; - } - Err(e) => return Err(BynarError::from(e)), - Ok(e) => e as zmq::PollEvents, - }; - // is the socket readable? - if events.contains(zmq::PollEvents::POLLIN) { - //get the id first {STREAM sockets get messages with id prepended} - let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly - trace!("Client ID {:?}", client_id); - // get actual message - while responder.get_rcvmore()? { - let mut msg = responder.recv_bytes(0)?; - debug!("Got msg len: {}", msg.len()); - trace!("Parsing msg {:?} as hex", msg); - while msg.len() > 0 { - let operation = match parse_from_bytes::(&msg) { - Ok(bytes) => bytes, - Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - continue; - } - }; - let client_id = client_id.clone(); - msg.drain(0..operation.write_to_bytes()?.len()); - let send_res = send_res.clone(); - let send_disk = send_disk.clone(); - let send_ticket = send_ticket.clone(); - - debug!("Operation requested: {:?}", operation.get_Op_type()); - if op_no_disk(&responder, &operation) { + if let Ok(responder) = responder.try_lock() { + let now = Instant::now(); + let events = match responder.get_events() { + Err(zmq::Error::EBUSY) => { + trace!("Socket Busy, skip"); + continue; + } + Err(e) => return Err(BynarError::from(e)), + Ok(e) => e as zmq::PollEvents, + }; + // is the socket readable? + if events.contains(zmq::PollEvents::POLLIN) { + //get the id first {STREAM sockets get messages with id prepended} + let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly + trace!("Client ID {:?}", client_id); + // get actual message + while responder.get_rcvmore()? { + let mut msg = responder.recv_bytes(0)?; + debug!("Got msg len: {}", msg.len()); + trace!("Parsing msg {:?} as hex", msg); + while !msg.is_empty() { + let operation = match parse_from_bytes::(&msg) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to parse_from_bytes {:?}. Ignoring request", e); continue; } - match operation.get_Op_type() { - Op::Add => { - let id = if operation.has_osd_id() { - Some(operation.get_osd_id()) - } else { - None + }; + let client_id = client_id.clone(); + msg.drain(0..operation.write_to_bytes()?.len()); + let send_res = send_res.clone(); + let send_disk = send_disk.clone(); + let send_ticket = send_ticket.clone(); + + debug!("Operation requested: {:?}", operation.get_Op_type()); + if op_no_disk(&responder, &operation) { + continue; + } + match operation.get_Op_type() { + Op::Add => { + let id = if operation.has_osd_id() { + Some(operation.get_osd_id()) + } else { + None + }; + s.spawn(move |_| { + let disk = operation.get_disk(); + match add_disk( + &send_res, + disk, + &backend_type, + id, + config_dir, + client_id, + ) { + Ok(_) => { + info!("Add disk finished"); + } + Err(e) => { + error!("Add disk error: {:?}", e); + } + } + }); + } + Op::AddPartition => { + // + } + Op::List => { + s.spawn(move |_| { + match list_disks(&send_disk, client_id) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } }; - s.spawn(move |_| { - let disk = operation.get_disk(); - match add_disk( - &send_res, - disk, - &backend_type, - id.clone(), - config_dir, - client_id, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } + }); + } + Op::Remove => { + let mut result = OpOutcomeResult::new(); + result.set_disk(operation.get_disk().to_string()); + result.set_op_type(Op::Remove); + + s.spawn(move |_| { + match safe_to_remove( + &Path::new(operation.get_disk()), + &backend_type, + config_dir, + ) { + Ok((OpOutcome::Success, true)) => { + match remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + config_dir, + client_id, + ) { + Ok(_) => { + info!("Remove disk finished"); + } + Err(e) => { + error!("Remove disk error: {:?}", e); + } + }; } - }); - } - Op::AddPartition => { - // - } - Op::List => { - s.spawn(move |_| { - match list_disks(&send_disk, client_id) { - Ok(_) => { - info!("List disks finished"); - } - Err(e) => { - error!("List disks error: {:?}", e); - } - }; - }); - } - Op::Remove => { - let mut result = OpOutcomeResult::new(); - result.set_disk(operation.get_disk().to_string()); - result.set_op_type(Op::Remove); - - s.spawn(move |_| { - match safe_to_remove( - &Path::new(operation.get_disk()), - &backend_type, - config_dir, - ) { - Ok((OpOutcome::Success, true)) => { - match remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - config_dir, - client_id, - ) { - Ok(_) => { - info!("Remove disk finished"); - } - Err(e) => { - error!("Remove disk error: {:?}", e); - } - }; - } - Ok((OpOutcome::Skipped, val)) => { - debug!("Disk skipped"); - result.set_outcome(OpOutcome::Skipped); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = send_res.send((client_id, result)); - } - Ok((OpOutcome::SkipRepeat, val)) => { - debug!("Disk skipped, safe to remove already ran"); - result.set_outcome(OpOutcome::SkipRepeat); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = send_res.send((client_id, result)); - } - Ok((_, false)) => { - debug!("Disk is not safe to remove"); - //Response to client - result.set_value(false); - result.set_outcome(OpOutcome::Success); - result.set_result(ResultType::ERR); - result.set_error_msg( - "Not safe to remove disk".to_string(), - ); - let _ = send_res.send((client_id, result)); - } - Err(e) => { - error!("safe to remove failed: {:?}", e); - // Response to client - result.set_value(false); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - let _ = send_res.send((client_id, result)); - } - }; - }); - } - Op::SafeToRemove => { - s.spawn(move |_| { - match safe_to_remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - config_dir, - client_id, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); - } - Err(e) => { - error!("Safe to remove error: {:?}", e); - } - }; - }); - } - Op::GetCreatedTickets => { - match get_jira_tickets(&send_ticket, config_dir, client_id) { + Ok((OpOutcome::Skipped, val)) => { + debug!("Disk skipped"); + result.set_outcome(OpOutcome::Skipped); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send((client_id, result)); + } + Ok((OpOutcome::SkipRepeat, val)) => { + debug!("Disk skipped, safe to remove already ran"); + result.set_outcome(OpOutcome::SkipRepeat); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send((client_id, result)); + } + Ok((_, false)) => { + debug!("Disk is not safe to remove"); + //Response to client + result.set_value(false); + result.set_outcome(OpOutcome::Success); + result.set_result(ResultType::ERR); + result.set_error_msg( + "Not safe to remove disk".to_string(), + ); + let _ = send_res.send((client_id, result)); + } + Err(e) => { + error!("safe to remove failed: {:?}", e); + // Response to client + result.set_value(false); + result.set_result(ResultType::ERR); + result.set_error_msg(e.to_string()); + let _ = send_res.send((client_id, result)); + } + }; + }); + } + Op::SafeToRemove => { + s.spawn(move |_| { + match safe_to_remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + config_dir, + client_id, + ) { Ok(_) => { - info!("Fetching jira tickets finished"); + info!("Safe to remove disk finished"); } Err(e) => { - error!("Fetching jira error: {:?}", e); + error!("Safe to remove error: {:?}", e); } }; - } - }; - } + }); + } + Op::GetCreatedTickets => { + match get_jira_tickets(&send_ticket, config_dir, client_id) { + Ok(_) => { + info!("Fetching jira tickets finished"); + } + Err(e) => { + error!("Fetching jira error: {:?}", e); + } + }; + } + }; } } - // send completed requests (or error messages) - if events.contains(zmq::PollEvents::POLLOUT) { - //check disks first, since those are faster requests than add/remove reqs - match recv_disk.try_recv() { - Ok((client_id, result)) => { - // send result back to client - //send client id back first - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - Err(_) => { - // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) - match recv_ticket.try_recv() { - Ok((client_id, result)) => { + } + // send completed requests (or error messages) + if events.contains(zmq::PollEvents::POLLOUT) { + //check disks first, since those are faster requests than add/remove reqs + match recv_disk.try_recv() { + Ok((client_id, result)) => { + // send result back to client + //send client id back first + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) + match recv_ticket.try_recv() { + Ok((client_id, result)) => { + // send result back to client + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // no disks in the queue, check if any add/remove/safe-to-remove req results + if let Ok((client_id, result)) = recv_res.try_recv() { // send result back to client let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } - Err(_) => { - // no disks in the queue, check if any add/remove/safe-to-remove req results - match recv_res.try_recv() { - Ok((client_id, result)) => { - // send result back to client - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - Err(_) => {} //do nothing - } - } } } } } - if daemon { - while now.elapsed() < Duration::from_millis(10) { - for signal in signals.pending() { - match signal as c_int { - signal_hook::SIGHUP => { - //Reload the config file - debug!("Reload Config File"); - let config_file = - helpers::load_config(config_dir, "disk-manager.json"); - if let Err(e) = config_file { - error!( - "Failed to load config file {}. error: {}", - config_dir.join("disk-manager.json").display(), - e - ); - return Ok(()); - } - let config: DiskManagerConfig = - config_file.expect("Failed to load config"); - let _ = notify_slack( - &config, - &format!("Reload disk-manager config file"), - ) - .expect("Unable to connect to slack"); - } - signal_hook::SIGINT | signal_hook::SIGCHLD => { - //skip this - debug!("Ignore signal"); - continue; - } - signal_hook::SIGTERM => { - //"gracefully" exit - debug!("Exit Process"); - break 'outer Ok(()); + } + if daemon { + while now.elapsed() < Duration::from_millis(10) { + for signal in signals.pending() { + match signal as c_int { + signal_hook::SIGHUP => { + //Reload the config file + debug!("Reload Config File"); + let config_file = + helpers::load_config(config_dir, "disk-manager.json"); + if let Err(e) = config_file { + error!( + "Failed to load config file {}. error: {}", + config_dir.join("disk-manager.json").display(), + e + ); + return Ok(()); } - _ => unreachable!(), + let config: DiskManagerConfig = + config_file.expect("Failed to load config"); + notify_slack( + &config, + &"Reload disk-manager config file".to_string(), + ) + .expect("Unable to connect to slack"); + } + signal_hook::SIGINT | signal_hook::SIGCHLD => { + //skip this + debug!("Ignore signal"); + continue; + } + signal_hook::SIGTERM => { + //"gracefully" exit + debug!("Exit Process"); + break 'outer Ok(()); } + _ => unreachable!(), } } } } - Err(_) => {} } })?; Ok(()) @@ -825,16 +819,13 @@ fn main() { .args(&["-p", &pid]) .output() .expect("Unable to open shell to run ps command"); - match output.status.code() { - Some(0) => { - let out = String::from_utf8_lossy(&output.stdout); - if out.contains("disk-manager") { - //skip - error!("There is already a running instance of disk-manager! Abort!"); - return; - } + if let Some(0) = output.status.code() { + let out = String::from_utf8_lossy(&output.stdout); + if out.contains("disk-manager") { + //skip + error!("There is already a running instance of disk-manager! Abort!"); + return; } - _ => {} } } let signals = Signals::new(&[ @@ -849,8 +840,10 @@ fn main() { let outfile = format!("/var/log/{}", config.daemon_output); let errfile = format!("/var/log/{}", config.daemon_error); - let stdout = File::create(&outfile).expect(&format!("{} creation failed", outfile)); - let stderr = File::create(&errfile).expect(&format!("{} creation failed", errfile)); + let stdout = + File::create(&outfile).unwrap_or_else(|_| panic!("{} creation failed", outfile)); + let stderr = + File::create(&errfile).unwrap_or_else(|_| panic!("{} creation failed", errfile)); trace!("I'm Parent and My pid is {}", process::id()); @@ -909,7 +902,7 @@ fn main() { ) { Ok(_) => { println!("Finished"); - let _ = notify_slack( + notify_slack( &config, &format!( "Disk-Manager Exited Successfully on host {}", @@ -920,7 +913,7 @@ fn main() { } Err(e) => { println!("Error: {:?}", e); - let _ = notify_slack( + notify_slack( &config, &format!( "Disk-Manager Errored out on host {} with {:?}", diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 681bda7..ec9c689 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -292,8 +292,7 @@ pub struct DBConfig { /// get message(s) from the socket pub fn get_messages(s: &Socket) -> BynarResult> { - let msg = s.recv_bytes(0)?; - let id = msg.clone(); + let id = s.recv_bytes(0)?; if s.get_rcvmore()? { return Ok(s.recv_bytes(0)?); } diff --git a/src/main.rs b/src/main.rs index 6005ff5..6f8715a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -77,7 +77,7 @@ impl DiskOp { // create a message map to handle list of disk-manager requests fn create_msg_map() -> BynarResult>>> { // List out currently mounted block_devices - let mut devices: Vec = block_utils::get_block_devices()? + let devices: Vec = block_utils::get_block_devices()? .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { @@ -197,7 +197,7 @@ fn get_map_op( } } } - return Ok(None); + Ok(None) } // replace the DiskOp associated with the input dev_path None and return the previous DiskOp @@ -227,10 +227,10 @@ fn remove_map_op( } } } - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "Path {} is not in the message map", dev_path.display() - ))); + ))) } // get the hashmap associated with a diskpath from the op map @@ -250,7 +250,10 @@ fn get_disk_map_op( return Ok(disk.clone()); } } - Err(BynarError::from(format!("Path is not a disk in the map"))) + Err(BynarError::from(format!( + "Path {} is not a disk in the map", + dev_path.display() + ))) } fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { @@ -333,6 +336,7 @@ fn add_disk_to_description( fn check_for_failed_disks( config: &ConfigSettings, + message_map: &mut HashMap>>, message_queue: &mut VecDeque<(Operation, Option, Option)>, host_info: &Host, pool: &Pool, @@ -352,6 +356,42 @@ fn check_for_failed_disks( )); info!("Checking all drives"); + let all_states = test_disk::check_all_disks(&host_info, pool, host_mapping)?; + // separate the states into Ok and Errors + let usable_states: Vec<_> = all_states + .iter() + .filter_map(|s| match s { + Ok(s) => Some(s), + Err(_) => None, + }) + .collect(); + // list of all states that have error'd out for some reason, once we've run every usable state, + // error out with the list of errors + let errored_states: Vec<_> = all_states + .iter() + .filter_map(|s| match s { + Ok(_) => None, + Err(e) => Some(e), + }) + .collect(); + //filter all the disks that are in the WaitingForReplacement state and are not currently undergoing an operation + let replacing: Vec<_> = usable_states + .iter() + .filter(|state_machine| { + if state_machine.block_device.state == State::WaitingForReplacement { + //check hashmap of the device path == None, or OpType != SafeToRemove || Remove + match get_map_op(&message_map, &state_machine.block_device.dev_path).unwrap() { + Some(op) => { + //check if op_type == SafeToRemove || Remove + !(op.op_type == Op::SafeToRemove || op.op_type == Op::Remove) + } + None => true, + } + } else { + false + } + }) + .collect(); for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { match result { Ok(state_machine) => { @@ -756,10 +796,10 @@ fn handle_operation_result( "Unable to get current operation in the map for {}", path.display() ); - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "Unable to get current operation in the map for {}", path.display() - ))); + ))) } _ => { //need to prep other stuff @@ -793,13 +833,7 @@ fn send_and_recieve( // check success outcome if val.get_outcome() == OpOutcome::Success && val.get_value() { //then ok to run Op::Remove - send_and_update( - s, - message_map, - client_id, - (mess, desc, op_id), - &path, - )?; + send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; } // safe-to-remove returned false or error'd so we should not remove but let manual handling // delete the remove request in this case (in otherwords, do nothing) @@ -810,13 +844,7 @@ fn send_and_recieve( "Previous request {:?} has finished, but hasn't been reset", disk_op.op_type ); - send_and_update( - s, - message_map, - client_id, - (mess, desc, op_id), - &path, - )?; + send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; } } else { // we haven't gotten response from previous request yet, push request to back of queue @@ -836,7 +864,7 @@ fn send_and_recieve( // NOTE: disks is not an option since list_disks is not a request that the main bynar program makes let mut message = helpers::get_messages(s)?; // skip empty initial message, and keep looping until no more messages from disk-manager - while message.len() > 0 { + while !message.is_empty() { // get message match get_message!(OpOutcomeResult, &message) { Ok(outcome) => { @@ -955,16 +983,13 @@ fn main() { .args(&["-p", &pid]) .output() .expect("Unable to open shell to run ps command"); - match output.status.code() { - Some(0) => { - let out = String::from_utf8_lossy(&output.stdout); - if out.contains("bynar") { - //skip - error!("There is already a running instance of bynar! Abort!"); - return; - } + if let Some(0) = output.status.code() { + let out = String::from_utf8_lossy(&output.stdout); + if out.contains("bynar") { + //skip + error!("There is already a running instance of bynar! Abort!"); + return; } - _ => {} } } let signals = Signals::new(&[ @@ -979,8 +1004,10 @@ fn main() { let outfile = format!("/var/log/{}", config.daemon_output); let errfile = format!("/var/log/{}", config.daemon_error); - let stdout = File::create(&outfile).expect(&format!("{} creation failed", outfile)); - let stderr = File::create(&errfile).expect(&format!("{} creation failed", errfile)); + let stdout = + File::create(&outfile).unwrap_or_else(|_| panic!("{} creation failed", outfile)); + let stderr = + File::create(&errfile).unwrap_or_else(|_| panic!("{} creation failed", errfile)); trace!("I'm Parent and My pid is {}", process::id()); @@ -1040,10 +1067,12 @@ fn main() { let dur = Duration::from_secs(time); let mut message_queue: VecDeque<(Operation, Option, Option)> = VecDeque::new(); + let mut message_map = create_msg_map().unwrap(); 'outer: loop { let now = Instant::now(); match check_for_failed_disks( &config, + &mut message_map, &mut message_queue, &host_info, &db_pool, @@ -1095,7 +1124,7 @@ fn main() { signal_hook::SIGHUP => { //Reload the config file debug!("Reload Config File"); - let _ = notify_slack( + notify_slack( &config, &format!("Reload config file on {}", host_info.hostname), ) @@ -1107,7 +1136,7 @@ fn main() { config_dir.join("bynar.json").display(), e ); - let _ = notify_slack( + notify_slack( &config, &format!( "Failed to load config file {}. error: {}", @@ -1140,7 +1169,7 @@ fn main() { } } debug!("Bynar exited successfully"); - let _ = notify_slack( + notify_slack( &config, &format!("Bynar on host {} has stopped", host_info.hostname), ) diff --git a/src/test_disk.rs b/src/test_disk.rs index fbb4d66..34fc96c 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -1412,7 +1412,7 @@ pub fn check_all_disks( Ok(_) => {} Err(e) => { error!("Add or Update Operation Error: {:?}", e); - return Err(BynarError::from(e)); + return Err(e); } }; // store the operation_id in BlockDevice struct From f44493609036708313cf71ab1c0f5a02ecfaba73 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 3 Feb 2020 17:01:26 -0500 Subject: [PATCH 18/76] Filter disks for WaitingForReplacement and not in-progress --- src/main.rs | 47 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6f8715a..04ae5c0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -98,20 +98,23 @@ fn create_msg_map() -> BynarResult> = HashMap::new(); disk_map.insert(device.to_path_buf(), None); // check if partition parent is device - for partition in &partitions { - if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { - if &disk == device { - disk_map.insert(partition.to_path_buf(), None); - } - } - } + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); map.insert(device.to_path_buf(), disk_map); - } + }); Ok(map) } @@ -197,7 +200,7 @@ fn get_map_op( } } } - Ok(None) + Ok(None) } // replace the DiskOp associated with the input dev_path None and return the previous DiskOp @@ -383,7 +386,19 @@ fn check_for_failed_disks( match get_map_op(&message_map, &state_machine.block_device.dev_path).unwrap() { Some(op) => { //check if op_type == SafeToRemove || Remove - !(op.op_type == Op::SafeToRemove || op.op_type == Op::Remove) + if !(op.op_type == Op::SafeToRemove || op.op_type == Op::Remove) { + // check if in_progress + info!("Connecting to database to check if disk is in progress"); + in_progress::is_hardware_waiting_repair( + pool, + host_mapping.storage_detail_id, + &state_machine.block_device.dev_path.to_string_lossy(), + None, + ) + .unwrap() + } else { + false + } } None => true, } @@ -392,6 +407,14 @@ fn check_for_failed_disks( } }) .collect(); + //filter Fail disks in seperate vec and soft-error those + /*replacing.iter().for_each( + |disk| { + //add safeToRemove + Remove request to message_queue, checking if its already in first + // create Operation, description, and get the op_id + let mess: (Operation, Option, Option) = (Operation::new().set_Op_type(Op::SafeToRemove), Some(description)) + } + );*/ for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { match result { Ok(state_machine) => { @@ -399,6 +422,7 @@ fn check_for_failed_disks( "Disk status: /dev/{} {:?}", state_machine.block_device.device.name, state_machine ); + // just use state_machine.block_device.dev_path??? let mut dev_path = PathBuf::from("/dev"); let dev_name = &state_machine.block_device.device.name; dev_path.push(&dev_name); @@ -510,6 +534,7 @@ fn check_for_failed_disks( } Some(i) => i, }; + // update operation detials in DB let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); From 479ade6bbd54dcf06d4af70476c0b08a1d7e38c0 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 5 Feb 2020 16:40:48 -0500 Subject: [PATCH 19/76] Switched to STREAM/STREAM system --- Cargo.lock | 32 ++++++++++++------------ src/backend/ceph.rs | 8 ++---- src/client.rs | 22 +++++++++++------ src/disk_manager.rs | 10 ++++++-- src/lib/lib.rs | 29 ++++++++++++++++------ src/main.rs | 60 +++++++++++++++++++++++++++++---------------- 6 files changed, 101 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f826b5d..724e704 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,13 +198,13 @@ dependencies = [ [[package]] name = "block-utils" version = "0.6.2" -source = "git+https://github.com/mzhong1/block-utils.git#518f7ddae349a925ebf59b4c5601fa4c89a1f1ca" +source = "git+https://github.com/mzhong1/block-utils.git#79c534b33270a07035709c58dae07c3b3b609ad3" dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "nom 4.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "shellscript 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "udev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -265,7 +265,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "signal-hook 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "simplelog 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "slack-hook 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -332,7 +332,7 @@ dependencies = [ "nom 5.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -416,7 +416,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -750,7 +750,7 @@ dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -851,7 +851,7 @@ dependencies = [ "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde-xml-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "unix_socket 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -865,7 +865,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -910,7 +910,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1181,7 +1181,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1303,7 +1303,7 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2099,7 +2099,7 @@ dependencies = [ "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2250,7 +2250,7 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.45" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2339,7 +2339,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3243,7 +3243,7 @@ dependencies = [ "checksum metadeps 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "73b122901b3a675fac8cecf68dcb2f0d3036193bc861d1ac0e1c337f7d5254c2" "checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" "checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" -"checksum miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" +"checksum miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" @@ -3347,7 +3347,7 @@ dependencies = [ "checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" "checksum serde-xml-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0c06881f4313eec67d4ecfcd8e14339f6042cfc0de4b1bd3ceae74c29d597f68" "checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)" = "eab8f15f15d6c41a154c1b128a22f2dfabe350ef53c40953d84e36155c91192b" +"checksum serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)" = "21b01d7f0288608a01dca632cf1df859df6fd6ffa885300fc275ce2ba6221953" "checksum serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" "checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0" "checksum shellscript 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "15c0d07fa97f8d209609a1a1549bd886bd907f520f75e1c785783167a66d20c4" diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 9d0ef80..423c4bf 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -249,13 +249,9 @@ fn validate_config(config: &mut CephConfig, cluster_handle: &Rados) -> BynarResu // get the OSDConfig for a given input osd path if one exists fn get_osd_config_by_path(config: &CephConfig, dev_path: &Path) -> BynarResult { let path = dev_path.to_string_lossy().to_string(); - let parent = match block_utils::get_parent_devpath_from_path(dev_path) { - Ok(Some(p)) => p.to_string_lossy().to_string(), - _ => path[..path.len() - 1].to_string(), - }; for osdconfig in &config.osd_config { - // if dev_path == path given, or osdconfig is NOT an lvm and the path given is the parent path - if osdconfig.dev_path == path || (!osdconfig.is_lvm && osdconfig.dev_path == parent) { + // if dev_path == path given + if osdconfig.dev_path == path { return Ok(osdconfig.clone()); } } diff --git a/src/client.rs b/src/client.rs index da2aa03..d29f5c9 100644 --- a/src/client.rs +++ b/src/client.rs @@ -43,20 +43,27 @@ fn add_disk( } fn list_disks(s: &Socket, client_id: Vec) -> BynarResult> { - helpers::list_disks_request(s, client_id)?; //loop until socket is readable, then get the response + let mut sent = false; loop { let events = poll_events!(s, continue); + //check if writable before sending request + if events.contains(zmq::PollEvents::POLLOUT) && !sent { + helpers::list_disks_request(s, client_id.clone())?; + sent = true; + } // got response if events.contains(zmq::PollEvents::POLLIN) { let message = helpers::get_messages(s)?; - let disks = get_message!(Disks, &message)?; - let mut d: Vec = Vec::new(); - for disk in disks.get_disk() { - d.push(disk.clone()); + if !message.is_empty() { + let disks = get_message!(Disks, &message)?; + let mut d: Vec = Vec::new(); + for disk in disks.get_disk() { + d.push(disk.clone()); + } + println!("disk list: {:?}", d); + return Ok(d); } - println!("disk list: {:?}", d); - return Ok(d); } } } @@ -312,6 +319,7 @@ fn main() { } }; let client_id: Vec = s.get_identity().unwrap(); + debug!("Client ID {:?}, len {}", client_id, client_id.len()); if let Some(ref matches) = matches.subcommand_matches("add") { handle_add_disk(&s, matches, client_id.clone()); } diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 6b973a2..37ae572 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -189,7 +189,7 @@ fn listen( ) -> BynarResult<()> { debug!("Starting zmq listener with version({:?})", zmq::version()); let context = zmq::Context::new(); - let responder = context.socket(zmq::STREAM)?; + let responder = context.socket(zmq::SocketType::STREAM)?; debug!("Listening on tcp://{}:5555", listen_address); // Fail to start if this fails @@ -228,11 +228,15 @@ fn listen( let mut msg = responder.recv_bytes(0)?; debug!("Got msg len: {}", msg.len()); trace!("Parsing msg {:?} as hex", msg); + if msg.len() == 0 { + continue; + } while !msg.is_empty() { - let operation = match parse_from_bytes::(&msg) { + let operation = match parse_from_bytes::(&msg.clone()) { Ok(bytes) => bytes, Err(e) => { error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + break 'outer Ok(()); continue; } }; @@ -451,6 +455,8 @@ fn listen( } } } + } else { + std::thread::sleep(Duration::from_millis(10)); } } })?; diff --git a/src/lib/lib.rs b/src/lib/lib.rs index ec9c689..66fa33f 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -34,7 +34,7 @@ where pub fn connect(host: &str, port: &str, server_publickey: &[u8]) -> BynarResult { debug!("Starting zmq sender with version({:?})", zmq::version()); let context = zmq::Context::new(); - let requester = context.socket(zmq::DEALER)?; + let requester = context.socket(zmq::STREAM)?; let client_keypair = zmq::CurveKeyPair::new()?; debug!("Created new keypair"); requester.set_curve_serverkey(server_publickey)?; @@ -44,6 +44,11 @@ pub fn connect(host: &str, port: &str, server_publickey: &[u8]) -> BynarResult BynarResu pub fn request(s: &Socket, op: Operation, client_id: Vec) -> BynarResult<()> { //send the id first s.send(&client_id, zmq::SNDMORE)?; - let encoded = op.write_to_bytes().unwrap(); + let encoded = op.write_to_bytes()?; debug!("Sending message"); s.send(&encoded, 0)?; Ok(()) @@ -86,7 +91,7 @@ pub fn add_disk_request( o.set_osd_id(id); } - let encoded = o.write_to_bytes().unwrap(); + let encoded = o.write_to_bytes()?; debug!("Sending message"); s.send(&encoded, 0)?; Ok(()) @@ -133,18 +138,24 @@ pub fn check_disk_request(s: &mut Socket) -> Result { /// send a list disk request to the disk-manager pub fn list_disks_request(s: &Socket, client_id: Vec) -> BynarResult<()> { //BynarResult> { + debug!("Printing ID {:?}", client_id); let mut o = Operation::new(); debug!("Creating list operation request"); //send the id first - s.send(&client_id, zmq::SNDMORE)?; o.set_Op_type(Op::List); debug!("Encoding as hex"); let encoded = o.write_to_bytes()?; - debug!("{:?}", encoded); - + debug!("Encoded value {:?}", encoded); debug!("Sending message"); - s.send(&encoded, 0)?; + + s.send(client_id, zmq::SNDMORE)?; + s.send(encoded, 0)?; + //(&[client_id, encoded], 0)?; + + //s.send(&client_id, zmq::SNDMORE)?; + //s.send("Send another message", zmq::SNDMORE)?; + //s.send(encoded, 0)?; Ok(()) /*debug!("Waiting for response"); let disks_response = s.recv_bytes(0)?; @@ -204,7 +215,7 @@ pub fn remove_disk_request( let encoded = o.write_to_bytes()?; debug!("Sending message"); - s.send(&encoded, 0)?; + s.send(encoded, 0)?; Ok(()) /*debug!("Waiting for response"); let remove_response = s.recv_bytes(0)?; @@ -334,12 +345,14 @@ macro_rules! make_op { /// get the list of JIRA tickets from disk-manager pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { + debug!("Printing ID {:?}", client_id); let mut o = Operation::new(); //send the id first s.send(&client_id, zmq::SNDMORE)?; debug!("calling get_jira_tickets "); o.set_Op_type(Op::GetCreatedTickets); let encoded = o.write_to_bytes()?; + debug!("encoded {:?}", encoded); debug!("Sending message in get_jira_tickets"); s.send(&encoded, 0)?; Ok(()) diff --git a/src/main.rs b/src/main.rs index 04ae5c0..8943998 100644 --- a/src/main.rs +++ b/src/main.rs @@ -385,20 +385,17 @@ fn check_for_failed_disks( //check hashmap of the device path == None, or OpType != SafeToRemove || Remove match get_map_op(&message_map, &state_machine.block_device.dev_path).unwrap() { Some(op) => { + // check if in_progress + info!("Connecting to database to check if disk is in progress"); + let in_progress = in_progress::is_hardware_waiting_repair( + pool, + host_mapping.storage_detail_id, + &state_machine.block_device.dev_path.to_string_lossy(), + None, + ) + .unwrap(); //check if op_type == SafeToRemove || Remove - if !(op.op_type == Op::SafeToRemove || op.op_type == Op::Remove) { - // check if in_progress - info!("Connecting to database to check if disk is in progress"); - in_progress::is_hardware_waiting_repair( - pool, - host_mapping.storage_detail_id, - &state_machine.block_device.dev_path.to_string_lossy(), - None, - ) - .unwrap() - } else { - false - } + !(op.op_type == Op::SafeToRemove || op.op_type == Op::Remove || in_progress) } None => true, } @@ -407,14 +404,35 @@ fn check_for_failed_disks( } }) .collect(); - //filter Fail disks in seperate vec and soft-error those - /*replacing.iter().for_each( - |disk| { - //add safeToRemove + Remove request to message_queue, checking if its already in first - // create Operation, description, and get the op_id - let mess: (Operation, Option, Option) = (Operation::new().set_Op_type(Op::SafeToRemove), Some(description)) - } - );*/ + //filter Fail disks in seperate vec and soft-error those at the end before checking the errored_states + let failed: Vec<_> = usable_states + .iter() + .filter(|state_machine| state_machine.block_device.state == State::Fail) + .collect(); + + replacing.iter().for_each(|state_machine| { + //add safeToRemove + Remove request to message_queue, checking if its already in first + // create Operation, description, and get the op_id + let mut desc = description.clone(); + add_disk_to_description( + &mut desc, + &state_machine.block_device.dev_path, + &state_machine, + ); + let op_id = match state_machine.block_device.operation_id { + None => { + error!( + "Operation not recorded for {}", + state_machine.block_device.dev_path.display() + ); + 0 + } + Some(i) => i, + }; + let mut op = Operation::new(); + op.set_Op_type(Op::SafeToRemove); + let mess: (Operation, Option, Option) = (op, Some(desc), Some(op_id)); + }); for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { match result { Ok(state_machine) => { From 5e50318ab896fd789a510cd75fd19fc970520e49 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 5 Feb 2020 17:00:10 -0500 Subject: [PATCH 20/76] Fix bynar-client to use new socket system --- src/client.rs | 77 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/src/client.rs b/src/client.rs index d29f5c9..745000d 100644 --- a/src/client.rs +++ b/src/client.rs @@ -29,15 +29,22 @@ fn add_disk( client_id: Vec, simulate: bool, ) -> BynarResult { - helpers::add_disk_request(s, path, id, client_id, simulate)?; + let mut sent = false; //loop until socket is readable, then get the response loop { let events = poll_events!(s, continue); + //check if writable before sending request + if events.contains(zmq::PollEvents::POLLOUT) && !sent { + helpers::add_disk_request(s, path, id, client_id.clone(), simulate)?; + sent = true; + } // got response if events.contains(zmq::PollEvents::POLLIN) { let message = helpers::get_messages(s)?; - let op_result = get_message!(OpOutcomeResult, &message)?; - get_op_result!(op_result, add_disk); + if !message.is_empty() { + let op_result = get_message!(OpOutcomeResult, &message)?; + get_op_result!(op_result, add_disk); + } } } } @@ -75,16 +82,23 @@ fn remove_disk( client_id: Vec, simulate: bool, ) -> BynarResult { - helpers::remove_disk_request(s, path, id, client_id, simulate)?; + let mut sent = false; //loop until socket is readable, then get the response loop { let events = poll_events!(s, continue); + //check if writable before sending request + if events.contains(zmq::PollEvents::POLLOUT) && !sent { + helpers::remove_disk_request(s, path, id, client_id.clone(), simulate)?; + sent = true; + } // got response if events.contains(zmq::PollEvents::POLLIN) { let message = helpers::get_messages(s)?; - let op_result = get_message!(OpOutcomeResult, &message)?; - get_op_result!(op_result, remove_disk); + if !message.is_empty() { + let op_result = get_message!(OpOutcomeResult, &message)?; + get_op_result!(op_result, remove_disk); + } } } } @@ -126,35 +140,42 @@ fn handle_list_disks(s: &Socket, client_id: Vec) { fn handle_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { trace!("handle_jira_tickets called"); - helpers::get_jira_tickets(s, client_id)?; + let mut sent = false; //loop until socket is readable, then get the response loop { let events = poll_events!(s, continue); + //check if writable before sending request + if events.contains(zmq::PollEvents::POLLOUT) && !sent { + helpers::get_jira_tickets(s, client_id.clone())?; + sent = true; + } // got response if events.contains(zmq::PollEvents::POLLIN) { let message = helpers::get_messages(s)?; - let tickets = get_message!(OpJiraTicketsResult, &message)?; - match tickets.get_result() { - ResultType::OK => { - debug!("got tickets successfully"); - let proto_jira = tickets.get_tickets(); - let mut _jira: Vec = Vec::new(); - for JiraInfo in proto_jira { - debug!("get_ticket_id: {}", JiraInfo.get_ticket_id()); - debug!("get_server_name: {}", JiraInfo.get_server_name()); + if !message.is_empty() { + let tickets = get_message!(OpJiraTicketsResult, &message)?; + match tickets.get_result() { + ResultType::OK => { + debug!("got tickets successfully"); + let proto_jira = tickets.get_tickets(); + let mut _jira: Vec = Vec::new(); + for JiraInfo in proto_jira { + debug!("get_ticket_id: {}", JiraInfo.get_ticket_id()); + debug!("get_server_name: {}", JiraInfo.get_server_name()); + } + return Ok(()); } - return Ok(()); - } - ResultType::ERR => { - if tickets.has_error_msg() { - let msg = tickets.get_error_msg(); - error!("get jira tickets failed : {}", msg); - return Err(BynarError::from(tickets.get_error_msg())); - } else { - error!("Get jira tickets failed but error_msg not set"); - return Err(BynarError::from( - "Get jira tickets failed but error_msg not set", - )); + ResultType::ERR => { + if tickets.has_error_msg() { + let msg = tickets.get_error_msg(); + error!("get jira tickets failed : {}", msg); + return Err(BynarError::from(tickets.get_error_msg())); + } else { + error!("Get jira tickets failed but error_msg not set"); + return Err(BynarError::from( + "Get jira tickets failed but error_msg not set", + )); + } } } } From 51e604160ac01b4e42233e6683fb211f224ecc3b Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 6 Feb 2020 11:39:25 -0500 Subject: [PATCH 21/76] parse_from_bytes grabs messages from the end of the byte vector given, added function to parse message properly and drain message bytes properly --- src/backend/ceph.rs | 22 +++++++++++++++------- src/client.rs | 8 +++++--- src/disk_manager.rs | 3 ++- src/lib/lib.rs | 29 +++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 423c4bf..e69a82a 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -1460,13 +1460,21 @@ impl Backend for CephBackend { //check if manual bluestore let osd_config = get_osd_config_by_path(&self.config, device)?; let osd_id = if !osd_config.is_lvm { - let mut part2: String = device.to_string_lossy().to_string(); - part2.truncate(part2.len() - 1); - part2.push_str("2"); - let part2 = Path::new(&part2); - debug!("CHECKING PATH {}", part2.display()); - //get the osd id - get_osd_id_from_device(&self.cluster_handle, part2)? + if let Some(e) = block_utils::get_parent_devpath_from_path(device)? { + let mut part2: String = device.to_string_lossy().to_string(); + part2.truncate(part2.len() - 1); + part2.push_str("2"); + let part2 = Path::new(&part2); + debug!("CHECKING PATH {}", part2.display()); + //get the osd id + get_osd_id_from_device(&self.cluster_handle, part2)? + } else { + let mut part2: String = device.to_string_lossy().to_string(); + part2.push_str("2"); + let part2 = Path::new(&part2); + debug!("CHECKING PATH {}", part2.display()); + get_osd_id_from_device(&self.cluster_handle, part2)? + } } else { //get the osd id get_osd_id_from_device(&self.cluster_handle, device)? diff --git a/src/client.rs b/src/client.rs index 745000d..a11d4fa 100644 --- a/src/client.rs +++ b/src/client.rs @@ -158,10 +158,12 @@ fn handle_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { ResultType::OK => { debug!("got tickets successfully"); let proto_jira = tickets.get_tickets(); - let mut _jira: Vec = Vec::new(); for JiraInfo in proto_jira { - debug!("get_ticket_id: {}", JiraInfo.get_ticket_id()); - debug!("get_server_name: {}", JiraInfo.get_server_name()); + println!("get_ticket_id: {}", JiraInfo.get_ticket_id()); + println!("get_server_name: {}", JiraInfo.get_server_name()); + } + if proto_jira.is_empty() { + println!("No outstanding tickets"); } return Ok(()); } diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 37ae572..027fd65 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -241,7 +241,8 @@ fn listen( } }; let client_id = client_id.clone(); - msg.drain(0..operation.write_to_bytes()?.len()); + let size = operation.write_to_bytes()?.len(); + msg.drain((msg.len() - size)..msg.len()); let send_res = send_res.clone(); let send_disk = send_disk.clone(); let send_ticket = send_ticket.clone(); diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 66fa33f..5c3c8fb 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -343,6 +343,35 @@ macro_rules! make_op { }}; } +#[macro_export] +/// get the first instance of a message type +macro_rules! get_first_instance { + ($message:expr, mess_type:ty) => { + let mut copy = message.clone(); + while !copy.is_empty() { + match parse_from_bytes::(©) { + Ok(mess) => { + let bytes = mess.write_to_bytes().unwrap(); + let size = bytes.len(); + //println!("compare {:?} with {:?}", bytes, copy); + if message.starts_with(&bytes) { + message.drain(0..size); + return Some(mess); + } + } + // we can't error out early since + // the tag/wire bits are at the end and we can't tell + // how long a message might be or what kind(s) are in the vec + Err(_) => {} + } + // parse from bytes grabs from the end of the byte array + //so, remove half the length of bytes from the end of the message and try again + copy.drain((copy.len() - 1)..copy.len()); + } + None + }; +} + /// get the list of JIRA tickets from disk-manager pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { debug!("Printing ID {:?}", client_id); From 08bce43cd0b76f347cf106223ceda12a1f81142e Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 6 Feb 2020 11:46:53 -0500 Subject: [PATCH 22/76] change remove/safeto-remove to be stricter with config/non-lvm bluestore removals --- src/backend/ceph.rs | 11 ++++++++--- src/main.rs | 11 ++++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index e69a82a..dff349e 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -1407,10 +1407,15 @@ impl Backend for CephBackend { let osd_config = get_osd_config_by_path(&self.config, device)?; let path_check; let mut part2: String = device.to_string_lossy().to_string(); - part2.truncate(part2.len() - 1); - part2.push_str("2"); if !osd_config.is_lvm { - path_check = Path::new(&part2); + if let Some(e) = block_utils::get_parent_devpath_from_path(device)? { + part2.truncate(part2.len() - 1); + part2.push_str("2"); + path_check = Path::new(&part2); + } else { + part2.push_str("2"); + path_check = Path::new(&part2); + } } else { path_check = device; } diff --git a/src/main.rs b/src/main.rs index 8943998..57f5d9c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -858,7 +858,7 @@ fn send_and_recieve( message_queue: &mut VecDeque<(Operation, Option, Option)>, client_id: Vec, ) -> BynarResult<()> { - // Note, all client sent messages are Operation, while return values can be OpJiraTicketResult, Disks, or OpOutcomeResult + // Note, all client sent messages are Operation, while return values of type OpOutcomeResult let events = poll_events!(s, return Ok(())); //check sendable first if events.contains(zmq::PollEvents::POLLOUT) { @@ -909,14 +909,15 @@ fn send_and_recieve( // skip empty initial message, and keep looping until no more messages from disk-manager while !message.is_empty() { // get message - match get_message!(OpOutcomeResult, &message) { + match get_first_instance!(&mut message, OpOutcomeResult) { Ok(outcome) => { - message.drain(0..outcome.write_to_bytes()?.len()); + //message.drain(0..outcome.write_to_bytes()?.len()); } Err(_) => { // must be tickets, since list_disks is never requested by bynar main program - let tickets = get_message!(OpJiraTicketsResult, &message)?; - message.drain(0..tickets.write_to_bytes()?.len()); + //let tickets = get_first_instance!(&mut message, OpJiraTicketsResult)?; + //message.drain(0..tickets.write_to_bytes()?.len()); + //Actually, this is a problem since Bynar only sends Add/SafeToRemove/Remove requests } } } From 6e7b3a51dcf31bb2c2cf6906d668ffa43a234a55 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 6 Feb 2020 15:01:32 -0500 Subject: [PATCH 23/76] Fix macro use for parsing opresults --- src/lib/lib.rs | 19 +++++++++++++------ src/main.rs | 8 ++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 5c3c8fb..148f3c6 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -346,16 +346,19 @@ macro_rules! make_op { #[macro_export] /// get the first instance of a message type macro_rules! get_first_instance { - ($message:expr, mess_type:ty) => { - let mut copy = message.clone(); + ($message:expr, $mess_type:ty) => {{ + let mut copy = $message.clone(); + if copy.is_empty() { + return None; + } while !copy.is_empty() { - match parse_from_bytes::(©) { + match parse_from_bytes::<$mess_type>(©) { Ok(mess) => { let bytes = mess.write_to_bytes().unwrap(); let size = bytes.len(); //println!("compare {:?} with {:?}", bytes, copy); - if message.starts_with(&bytes) { - message.drain(0..size); + if $message.starts_with(&bytes) { + $message.drain(0..size); return Some(mess); } } @@ -369,7 +372,11 @@ macro_rules! get_first_instance { copy.drain((copy.len() - 1)..copy.len()); } None - }; + }}; +} + +pub fn get_first_op_result(message: &mut Vec) -> Option { + get_first_instance!(message, OpOutcomeResult) } /// get the list of JIRA tickets from disk-manager diff --git a/src/main.rs b/src/main.rs index 57f5d9c..87b6a65 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,7 +30,7 @@ use crate::test_disk::{State, StateMachine}; use api::service::{Op, OpJiraTicketsResult, OpOutcome, OpOutcomeResult, Operation, ResultType}; use clap::{crate_authors, crate_version, App, Arg}; use daemonize::Daemonize; -use helpers::{error::*, host_information::Host, ConfigSettings}; +use helpers::{error::*, get_first_instance, host_information::Host, ConfigSettings}; use libc::c_int; use log::{debug, error, info, trace, warn}; use protobuf::parse_from_bytes; @@ -909,11 +909,11 @@ fn send_and_recieve( // skip empty initial message, and keep looping until no more messages from disk-manager while !message.is_empty() { // get message - match get_first_instance!(&mut message, OpOutcomeResult) { - Ok(outcome) => { + match helpers::get_first_op_result(&mut message) { + Some(outcome) => { //message.drain(0..outcome.write_to_bytes()?.len()); } - Err(_) => { + None => { // must be tickets, since list_disks is never requested by bynar main program //let tickets = get_first_instance!(&mut message, OpJiraTicketsResult)?; //message.drain(0..tickets.write_to_bytes()?.len()); From 3528e8c649e314de6ed33806a6bbeb80c5237008 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 7 Feb 2020 09:42:46 -0500 Subject: [PATCH 24/76] filter state machines for the disks that need replacement + add in the partitions on the disks to be replaced to the list --- src/main.rs | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 87b6a65..bcf41e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -378,7 +378,7 @@ fn check_for_failed_disks( }) .collect(); //filter all the disks that are in the WaitingForReplacement state and are not currently undergoing an operation - let replacing: Vec<_> = usable_states + let mut replacing: Vec<_> = usable_states .iter() .filter(|state_machine| { if state_machine.block_device.state == State::WaitingForReplacement { @@ -404,6 +404,24 @@ fn check_for_failed_disks( } }) .collect(); + // add the partition state machines? to the replacing list + let mut add_replacing = Vec::new(); + for state_machine in &replacing { + let disks = get_disk_map_op(message_map, &state_machine.block_device.dev_path)?; + // uh, get list of keys in disks and filter usable list for keypath? + let mut add: Vec<_> = usable_states.iter().filter(|state_machine| { + disks.contains_key(&state_machine.block_device.dev_path) + }).collect(); + add_replacing.append(&mut add); + } + //combine with replacing, then do sort_unstable_by and dedup_rm + replacing.append(&mut add_replacing); + replacing.sort_unstable_by(|a, b| { + a.block_device.dev_path.partial_cmp(&b.block_device.dev_path).unwrap() + }); + replacing.dedup_by(|a, b| { + a.block_device.dev_path.eq(&b.block_device.dev_path) + }); //filter Fail disks in seperate vec and soft-error those at the end before checking the errored_states let failed: Vec<_> = usable_states .iter() @@ -429,8 +447,10 @@ fn check_for_failed_disks( } Some(i) => i, }; - let mut op = Operation::new(); - op.set_Op_type(Op::SafeToRemove); + let mut op = helpers::make_op!( + SafeToRemove, + format!("{}", state_machine.block_device.dev_path.display()) + ); let mess: (Operation, Option, Option) = (op, Some(desc), Some(op_id)); }); for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { From 91ec3b88e8dd298d1409021f332f80688de47b0a Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 7 Feb 2020 09:47:55 -0500 Subject: [PATCH 25/76] Push SafeToRemove + Remove to queue --- src/main.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index bcf41e0..a6eba9a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -451,7 +451,16 @@ fn check_for_failed_disks( SafeToRemove, format!("{}", state_machine.block_device.dev_path.display()) ); - let mess: (Operation, Option, Option) = (op, Some(desc), Some(op_id)); + let mess: (Operation, Option, Option) = (op, Some(desc.clone()), Some(op_id)); + let mut op2 = helpers::make_op!( + Remove, + format!("{}", state_machine.block_device.dev_path.display()) + ); + let mess2: (Operation, Option, Option) = (op2, Some(desc), Some(op_id)); + if !message_queue.contains(&mess) && !message_queue.contains(&mess2) { + message_queue.push_back(mess); + message_queue.push_back(mess2); + } }); for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { match result { From c9936c211b21ac5ead48526ee93d7230db52863d Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 7 Feb 2020 11:44:54 -0500 Subject: [PATCH 26/76] Error's cannot be cloned, so change check_for_failed_disk to error out immediately if check_all_disks failed --- src/main.rs | 57 +++++++++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/src/main.rs b/src/main.rs index a6eba9a..022cce8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -359,24 +359,21 @@ fn check_for_failed_disks( )); info!("Checking all drives"); - let all_states = test_disk::check_all_disks(&host_info, pool, host_mapping)?; + let all_states: BynarResult> = + test_disk::check_all_disks(&host_info, pool, host_mapping)? + .into_iter() + .collect(); // separate the states into Ok and Errors - let usable_states: Vec<_> = all_states - .iter() - .filter_map(|s| match s { - Ok(s) => Some(s), - Err(_) => None, - }) - .collect(); - // list of all states that have error'd out for some reason, once we've run every usable state, - // error out with the list of errors - let errored_states: Vec<_> = all_states - .iter() - .filter_map(|s| match s { - Ok(_) => None, - Err(e) => Some(e), - }) - .collect(); + let usable_states: Vec<_> = match all_states { + Ok(s) => s, + Err(e) => { + error!("check_all_disks failed with error: {:?}", e); + return Err(BynarError::new(format!( + "check_all_disks failed with error: {:?}", + e + ))); + } + }; //filter all the disks that are in the WaitingForReplacement state and are not currently undergoing an operation let mut replacing: Vec<_> = usable_states .iter() @@ -409,19 +406,21 @@ fn check_for_failed_disks( for state_machine in &replacing { let disks = get_disk_map_op(message_map, &state_machine.block_device.dev_path)?; // uh, get list of keys in disks and filter usable list for keypath? - let mut add: Vec<_> = usable_states.iter().filter(|state_machine| { - disks.contains_key(&state_machine.block_device.dev_path) - }).collect(); + let mut add: Vec<_> = usable_states + .iter() + .filter(|state_machine| disks.contains_key(&state_machine.block_device.dev_path)) + .collect(); add_replacing.append(&mut add); } //combine with replacing, then do sort_unstable_by and dedup_rm replacing.append(&mut add_replacing); replacing.sort_unstable_by(|a, b| { - a.block_device.dev_path.partial_cmp(&b.block_device.dev_path).unwrap() - }); - replacing.dedup_by(|a, b| { - a.block_device.dev_path.eq(&b.block_device.dev_path) + a.block_device + .dev_path + .partial_cmp(&b.block_device.dev_path) + .unwrap() }); + replacing.dedup_by(|a, b| a.block_device.dev_path.eq(&b.block_device.dev_path)); //filter Fail disks in seperate vec and soft-error those at the end before checking the errored_states let failed: Vec<_> = usable_states .iter() @@ -462,7 +461,7 @@ fn check_for_failed_disks( message_queue.push_back(mess2); } }); - for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { + /*for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { match result { Ok(state_machine) => { info!( @@ -604,7 +603,13 @@ fn check_for_failed_disks( ))); } }; - } + }*/ + failed.iter().for_each(|state_machine| { + error!( + "Disk {} ended in a Fail state", + state_machine.block_device.dev_path.display() + ) + }); Ok(()) } From 397e153dc437481ad5bb0e0dfee30d8e573a1592 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 7 Feb 2020 15:10:10 -0500 Subject: [PATCH 27/76] handle add and remove return values --- src/main.rs | 121 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 113 insertions(+), 8 deletions(-) diff --git a/src/main.rs b/src/main.rs index 022cce8..098501b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -720,8 +720,6 @@ fn add_repaired_disks( storage_detail_id: u32, simulate: bool, ) -> BynarResult<()> { - //let public_key = get_public_key(&config, &host_info)?; - info!("Getting outstanding repair tickets"); let tickets = in_progress::get_outstanding_repair_tickets(&pool, storage_detail_id)?; debug!("outstanding tickets: {:?}", tickets); @@ -735,10 +733,6 @@ fn add_repaired_disks( format!("{}", Path::new(&ticket.device_path).display()), simulate ); - /*let mut o = Operation::new(); - o.set_Op_type(Op::Add); - o.set_disk(format!("{}", Path::new(&ticket.device_path).display())); - o.set_simulate(simulate);*/ let tid = Some(ticket.ticket_id.to_string()); message_queue.push_back((op, tid, None)); //CALL RPC @@ -834,6 +828,7 @@ fn handle_operation_result( message_map: &mut HashMap>>, pool: &Pool, op_res: OpOutcomeResult, + config: &ConfigSettings, ) -> BynarResult<()> { match op_res.get_result() { ResultType::OK => {} @@ -878,10 +873,120 @@ fn handle_operation_result( path.display() ))) } - _ => { - //need to prep other stuff + Op::SafeToRemove => { + // get the op from map, update it with outcome, handle errors as necessary (just store in map) + let dev_path = PathBuf::from(op_res.get_disk()); + if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { + current_op.ret_val = Some(op_res); + //push op back into map + add_or_update_map_op(message_map, &dev_path, current_op)?; + } + //otherwise error.... + return Err(BynarError::from(format!( + "{} does not have a currently running operation!", + dev_path.display() + ))); + } + Op::Remove => { + //check if successful or not and send to slack + let dev_path = PathBuf::from(op_res.get_disk()); + match op_res.get_outcome() { + OpOutcome::Success => { + debug!("Disk {} removal successful", dev_path.display()); + let _ = notify_slack( + config, + &format!("Disk {} removal successful", dev_path.display()), + ); + } + OpOutcome::Skipped => { + debug!("Disk {} skipped, disk is not removable", dev_path.display()); + let _ = notify_slack( + config, + &format!("Disk {} skipped, disk is not removable", dev_path.display()), + ); + } + OpOutcome::SkipRepeat => { + debug!("Disk {} already removed, skipping.", dev_path.display()); + let _ = notify_slack( + config, + &format!("Disk {} already removed, skipping.", dev_path.display()), + ); + } + } + //update map + if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { + current_op.ret_val = Some(op_res); + //push op back into map + add_or_update_map_op(message_map, &dev_path, current_op)?; + } else { + return Err(BynarError::from(format!( + "{} does not have a currently running operation!", + dev_path.display() + ))); + } + // check if all ops in the disk have finished + let disk = get_disk_map_op(message_map, &dev_path)?; + let mut all_finished = true; + disk.iter().for_each(|(k, v)| { + //check if value finished + if let Some(val) = v { + if val.ret_val.is_none() { + all_finished = false; + } + } + }); + //if all finished open ticket+ notify slack + if all_finished { + // get the path of the disk + let path = + if let Some(parent) = block_utils::get_parent_devpath_from_path(&dev_path)? { + parent + } else { + dev_path + }; + // get the current op associated with the disk + if let Some(current_op) = get_map_op(message_map, &path)? { + let description = match current_op.description { + Some(d) => d, + None => { + return Err(BynarError::from(format!( + "Disk {} is missing a description", + path.display() + ))) + } + }; + let op_id = match current_op.operation_id { + None => { + error!("Operation not recorded for {}", path.display()); + 0 + } + Some(i) => i, + }; + //open JIRA ticket+ notify slack + debug!("Creating support ticket"); + let ticket_id = + create_support_ticket(config, "Bynar: Dead disk", &description)?; + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + return Err(BynarError::from(format!( + "Disk {} is missing the current operation", + path.display() + ))); + } Ok(()) } + _ => { + // these operations should never get called by Bynar + return Err(BynarError::from(format!( + "{} could not have run this operation!", + op_res.get_disk() + ))); + } } } From 04f690f90cec61db58c5b5cc0e1bc9579fec93c7 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 10 Feb 2020 09:04:25 -0500 Subject: [PATCH 28/76] Add new message handler to main function --- src/main.rs | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/src/main.rs b/src/main.rs index 098501b..f9cea27 100644 --- a/src/main.rs +++ b/src/main.rs @@ -995,6 +995,8 @@ fn send_and_recieve( s: &Socket, message_map: &mut HashMap>>, message_queue: &mut VecDeque<(Operation, Option, Option)>, + pool: &Pool, + config: &ConfigSettings, client_id: Vec, ) -> BynarResult<()> { // Note, all client sent messages are Operation, while return values of type OpOutcomeResult @@ -1051,12 +1053,14 @@ fn send_and_recieve( match helpers::get_first_op_result(&mut message) { Some(outcome) => { //message.drain(0..outcome.write_to_bytes()?.len()); + handle_operation_result(message_map, pool, outcome, config)?; } None => { - // must be tickets, since list_disks is never requested by bynar main program - //let tickets = get_first_instance!(&mut message, OpJiraTicketsResult)?; - //message.drain(0..tickets.write_to_bytes()?.len()); //Actually, this is a problem since Bynar only sends Add/SafeToRemove/Remove requests + error!("Message is not an OpOutcomeResult"); + return Err(BynarError::from(format!( + "Message received is not an OpOutcomeResult" + ))); } } } @@ -1175,6 +1179,7 @@ fn main() { } } } + let signals = Signals::new(&[ signal_hook::SIGHUP, signal_hook::SIGTERM, @@ -1247,7 +1252,18 @@ fn main() { d } }; - + let public_key = get_public_key(&config, &host_info).unwrap(); + let s = match helpers::connect(&config.manager_host, + &config.manager_port.to_string(), + &public_key,) { + Ok(s) => s, + Err(e) => { + error!("Error connecting to socket: {:?}", e); + return; + } + }; + let client_id: Vec = s.get_identity().unwrap(); + debug!("Client ID {:?}, len {}", client_id, client_id.len()); let dur = Duration::from_secs(time); let mut message_queue: VecDeque<(Operation, Option, Option)> = VecDeque::new(); let mut message_map = create_msg_map().unwrap(); @@ -1300,8 +1316,8 @@ fn main() { info!("Add repaired disks completed"); } }; - if daemon { - while now.elapsed() < dur { + while now.elapsed() < dur { + if daemon { for signal in signals.pending() { match signal as c_int { signal_hook::SIGHUP => { @@ -1346,9 +1362,17 @@ fn main() { _ => unreachable!(), } } + } else { + break 'outer; } - } else { - break 'outer; + send_and_recieve( + &s, + &mut message_map, + &mut message_queue, + &db_pool, + &config, + client_id.clone(), + ).unwrap(); } } debug!("Bynar exited successfully"); From 85d3d618b67b47ec1b1d4ab8b1719158d9b9ba3e Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 10 Feb 2020 10:22:31 -0500 Subject: [PATCH 29/76] Add functions to create a req_map for disk-manager + check if ops are already running --- src/disk_manager.rs | 50 ++++++++++++++++++++++++++++++++++++++++++++- src/main.rs | 11 ++++++---- 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 027fd65..f9ac358 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -1,9 +1,10 @@ use serde_derive::*; +use std::collections::HashMap; use std::fs; use std::fs::{create_dir, read_to_string, File}; use std::io::{Error, ErrorKind, Write}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process; use std::process::Command; use std::str::FromStr; @@ -39,6 +40,50 @@ use simplelog::{CombinedLogger, Config, SharedLogger, TermLogger, WriteLogger}; use slack_hook::{PayloadBuilder, Slack}; use zmq::Socket; +// Create the request map for the disk-manager +fn create_req_map() -> BynarResult>> { + // List out currently mounted block_devices + let mut devices: Vec = block_utils::get_block_devices()? + .into_iter() + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) + }) + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) + }) + .collect(); + let mut partitions = block_utils::get_block_partitions()?; + devices.append(&mut partitions); + // add all devices to the HashMap (initialize the Hashmap) + let mut req_map: HashMap> = HashMap::new(); + devices.iter().for_each(|device| { + req_map.insert(device.to_path_buf(), None); + }); + Ok(req_map) +} + +// check if a disk already has a request. Return true if an op is already running (false otherwise or if +// op is List or GetCreatedTickets) +fn is_op_running(req_map: &mut HashMap>, op: Operation) -> bool { + // if op_type is List or GetCreatedTickets, return false + match op.get_Op_type() { + Op::List | Op::GetCreatedTickets => false, + _ => { + let disk = op.get_disk(); + let path = PathBuf::from(disk); + req_map.get(&path).is_some() + } + } +} + // send a notification to slack channel (if config has webhook) fn notify_slack(config: &DiskManagerConfig, msg: &str) -> BynarResult<()> { let c = config.clone(); @@ -207,6 +252,9 @@ fn listen( let (send_disk, recv_disk) = crossbeam_channel::unbounded::<(Vec, Disks)>(); let (send_ticket, recv_ticket) = crossbeam_channel::unbounded::<(Vec, OpJiraTicketsResult)>(); + + debug!("Create request map"); + let mut req_map = create_req_map()?; pool.scope(|s| 'outer: loop { if let Ok(responder) = responder.try_lock() { let now = Instant::now(); diff --git a/src/main.rs b/src/main.rs index f9cea27..40e1728 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1179,7 +1179,7 @@ fn main() { } } } - + let signals = Signals::new(&[ signal_hook::SIGHUP, signal_hook::SIGTERM, @@ -1253,9 +1253,11 @@ fn main() { } }; let public_key = get_public_key(&config, &host_info).unwrap(); - let s = match helpers::connect(&config.manager_host, + let s = match helpers::connect( + &config.manager_host, &config.manager_port.to_string(), - &public_key,) { + &public_key, + ) { Ok(s) => s, Err(e) => { error!("Error connecting to socket: {:?}", e); @@ -1372,7 +1374,8 @@ fn main() { &db_pool, &config, client_id.clone(), - ).unwrap(); + ) + .unwrap(); } } debug!("Bynar exited successfully"); From 96ce4b0616698f7dbee7cc5482608ac6c6b3d840 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 10 Feb 2020 12:57:06 -0500 Subject: [PATCH 30/76] Added request map to disk-manager, skip repeat requests --- src/disk_manager.rs | 74 ++++++++++++++++++++++++++++++++++++++++++--- src/lib/lib.rs | 1 - 2 files changed, 70 insertions(+), 5 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index f9ac358..2785f53 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -70,18 +70,65 @@ fn create_req_map() -> BynarResult>> { Ok(req_map) } +#[macro_export] +/// Get the disk path of an Operation/OpOutcomeResult (any protobuf message with a get_disk() where disk is the diskpath) as a PathBuf +macro_rules! get_op_pathbuf { + ($op:expr) => {{ + let disk = $op.get_disk(); + PathBuf::from(disk) + }}; +} + // check if a disk already has a request. Return true if an op is already running (false otherwise or if // op is List or GetCreatedTickets) -fn is_op_running(req_map: &mut HashMap>, op: Operation) -> bool { +fn is_op_running(req_map: &mut HashMap>, op: &Operation) -> bool { // if op_type is List or GetCreatedTickets, return false match op.get_Op_type() { Op::List | Op::GetCreatedTickets => false, + _ => req_map.get(&get_op_pathbuf!(op)).is_some(), + } +} + +macro_rules! op_running { + ($req_map:expr,$op:expr) => {{ + match $op.get_Op_type() { + Op::List | Op::GetCreatedTickets => false, + _ => { + if let Some(o) = $req_map.get(&get_op_pathbuf!($op)) { + o.is_some() + } else { + false + } + } + } + }}; + ($req_map:expr,$op:expr,$is_result:expr) => {{ + match $op.get_op_type() { + Op::List | Op::GetCreatedTickets => $is_result, //handle List or GetCreatedTickets... + _ => { + if let Some(o) = $req_map.get(&get_op_pathbuf!($op)) { + o.is_some() + } else { + false + } + } + } + }}; +} + +// Note: if the operation is List or GetCreatedTickets, skip adding it to the map +// REQUIRES: assert!(!is_op_running(req_map, op)) +// ENSURES: assert!(is_op_running(req_map, op)) (if op_type != List || GetCreatedTickets) +fn op_insert(req_map: &mut HashMap>, op: &Operation) { + assert!(!op_running!(req_map, op)); + // if op_type is List or GetCreatedTickets, skip + match op.get_Op_type() { + Op::List | Op::GetCreatedTickets => return, _ => { - let disk = op.get_disk(); - let path = PathBuf::from(disk); - req_map.get(&path).is_some() + req_map.insert(get_op_pathbuf!(op), Some(op.get_Op_type())); //no getting around the clone here unfortunately... } } + assert!(op_running!(req_map, op)); } // send a notification to slack channel (if config has webhook) @@ -299,6 +346,19 @@ fn listen( if op_no_disk(&responder, &operation) { continue; } + // check if op is currently running. If so, skip it + if op_running!(&mut req_map, &operation) { + trace!("Operation {:?} cannot be run, disk is already running an operation", operation); + //build OpOutcomeResult with SkipRepeat, send to output? + let mut op_res = OpOutcomeResult::new(); + op_res.set_disk(operation.get_disk().to_string()); + op_res.set_outcome(OpOutcome::SkipRepeat); + op_res.set_op_type(operation.get_Op_type()); + op_res.set_result(ResultType::OK); + send_res.send((client_id, op_res)); + continue; + } + op_insert(&mut req_map, &operation); match operation.get_Op_type() { Op::Add => { let id = if operation.has_osd_id() { @@ -457,6 +517,12 @@ fn listen( // no disks in the queue, check if any add/remove/safe-to-remove req results if let Ok((client_id, result)) = recv_res.try_recv() { // send result back to client + //check if result is SkipRepeat, if so, skipp the assert! and insert + debug!("Send {:?}", result); + if OpOutcome::SkipRepeat != result.get_outcome() { + assert!(op_running!(req_map, &result, true)); + req_map.insert(get_op_pathbuf!(&result), None); // set entry in req_map to None + } let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 148f3c6..cb4f8a1 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -44,7 +44,6 @@ pub fn connect(host: &str, port: &str, server_publickey: &[u8]) -> BynarResult Date: Mon, 10 Feb 2020 15:40:46 -0500 Subject: [PATCH 31/76] Add behavior to output correct message if skipping an operation because the disk-manager is already running an op on specified disk --- src/client.rs | 28 ++++++++++++++++++++-------- src/disk_manager.rs | 1 + src/main.rs | 43 ++++++++++++++++++++++++++++++++++--------- 3 files changed, 55 insertions(+), 17 deletions(-) diff --git a/src/client.rs b/src/client.rs index a11d4fa..f618899 100644 --- a/src/client.rs +++ b/src/client.rs @@ -28,7 +28,7 @@ fn add_disk( id: Option, client_id: Vec, simulate: bool, -) -> BynarResult { +) -> BynarResult { let mut sent = false; //loop until socket is readable, then get the response loop { @@ -43,7 +43,7 @@ fn add_disk( let message = helpers::get_messages(s)?; if !message.is_empty() { let op_result = get_message!(OpOutcomeResult, &message)?; - get_op_result!(op_result, add_disk); + return Ok(op_result); } } } @@ -81,7 +81,7 @@ fn remove_disk( id: Option, client_id: Vec, simulate: bool, -) -> BynarResult { +) -> BynarResult { let mut sent = false; //loop until socket is readable, then get the response @@ -97,7 +97,7 @@ fn remove_disk( let message = helpers::get_messages(s)?; if !message.is_empty() { let op_result = get_message!(OpOutcomeResult, &message)?; - get_op_result!(op_result, remove_disk); + return Ok(op_result); } } } @@ -115,10 +115,16 @@ fn handle_add_disk(s: &Socket, matches: &ArgMatches<'_>, client_id: Vec) { None => false, }; match add_disk(s, &p, id, client_id, simulate) { - Ok(outcome) => match outcome { + Ok(outcome) => match outcome.get_outcome() { OpOutcome::Success => println!("Adding disk successful"), OpOutcome::Skipped => println!("Disk cannot be added, Skipping"), - OpOutcome::SkipRepeat => println!("Disk already added, Skipping"), + OpOutcome::SkipRepeat => { + if outcome.has_value() { + println!("Disk has an operation ongoing, Skipping") + } else { + println!("Disk already added, Skipping") + } + } }, Err(e) => { println!("Adding disk failed: {}", e); @@ -197,10 +203,16 @@ fn handle_remove_disk(s: &Socket, matches: &ArgMatches<'_>, client_id: Vec) None => false, }; match remove_disk(s, &p, id, client_id, simulate) { - Ok(outcome) => match outcome { + Ok(outcome) => match outcome.get_outcome() { OpOutcome::Success => println!("Removing disk successful"), OpOutcome::Skipped => println!("Disk cannot be removed. Skipping"), - OpOutcome::SkipRepeat => println!("Disk already removed. Skipping"), + OpOutcome::SkipRepeat => { + if outcome.has_value() { + println!("Disk has an operation ongoing, Skipping") + } else { + println!("Disk already removed, Skipping") + } + } }, Err(e) => { println!("Removing disk failed: {}", e); diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 2785f53..4e94047 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -355,6 +355,7 @@ fn listen( op_res.set_outcome(OpOutcome::SkipRepeat); op_res.set_op_type(operation.get_Op_type()); op_res.set_result(ResultType::OK); + op_res.set_value(false); send_res.send((client_id, op_res)); continue; } diff --git a/src/main.rs b/src/main.rs index 40e1728..481daf8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -809,13 +809,24 @@ fn send_and_update( } // handle the return value from an add_disk request -fn handle_add_disk_res(pool: &Pool, outcome: OpOutcome, ticket_id: String) { - match outcome { +fn handle_add_disk_res( + pool: &Pool, + outcome: &OpOutcomeResult, + ticket_id: String, +) { + match outcome.get_outcome() { OpOutcome::Success => debug!("Disk added successfully. Updating database record"), // Disk was either boot or something that shouldn't be added via backend OpOutcome::Skipped => debug!("Disk Skipped. Updating database record"), // Disk is already in the cluster - OpOutcome::SkipRepeat => debug!("Disk already added. Skipping. Updating database record"), + OpOutcome::SkipRepeat => { + if !outcome.has_value() { + debug!("Disk already added. Skipping. Updating database record") + } else { + debug!("Disk already undergoing an operation. Skipping. Do not update database record"); + return; + } + } } match in_progress::resolve_ticket_in_db(pool, &ticket_id) { Ok(_) => debug!("Database updated"), @@ -859,7 +870,7 @@ fn handle_operation_result( let path = Path::new(op_res.get_disk()); if let Some(disk_op) = get_map_op(message_map, &path.to_path_buf())? { if let Some(ticket_id) = disk_op.description { - handle_add_disk_res(pool, op_res.get_outcome(), ticket_id); + handle_add_disk_res(pool, &op_res, ticket_id); //update result in the map (in otherwords, just set it to None) remove_map_op(message_map, &path.to_path_buf())?; } @@ -906,11 +917,25 @@ fn handle_operation_result( ); } OpOutcome::SkipRepeat => { - debug!("Disk {} already removed, skipping.", dev_path.display()); - let _ = notify_slack( - config, - &format!("Disk {} already removed, skipping.", dev_path.display()), - ); + if op_res.has_value() { + debug!( + "Disk {} currently undergoing another operation, skipping", + dev_path.display() + ); + let _ = notify_slack( + config, + &format!( + "Disk {} currently undergoing another operation, skipping", + dev_path.display() + ), + ); + } else { + debug!("Disk {} already removed, skipping.", dev_path.display()); + let _ = notify_slack( + config, + &format!("Disk {} already removed, skipping.", dev_path.display()), + ); + } } } //update map From 6737dde038f24bf538e23cba6cb01560f243390c Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 12 Feb 2020 11:18:16 -0500 Subject: [PATCH 32/76] fix the return values to not error incorrectly --- src/main.rs | 166 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 132 insertions(+), 34 deletions(-) diff --git a/src/main.rs b/src/main.rs index 481daf8..c65adc1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -149,29 +149,68 @@ fn add_or_update_map_op( message_map.insert(parent.to_path_buf(), disk_map); } } else { - //not partition - //parent is in the map - if let Some(disk) = message_map.get_mut(dev_path) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map + //not partition or partition destroyed + if dev_path.exists() { + //parent is in the map + if let Some(disk) = message_map.get_mut(dev_path) { + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), Some(op)); + return Ok(partition.clone()); + } disk.insert(dev_path.to_path_buf(), Some(op)); - return Ok(partition.clone()); + } else { + //add to map + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(dev_path.to_path_buf(), Some(op)); + let partitions = block_utils::get_block_partitions()?; + // check if partition parent is device + for partition in &partitions { + if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { + if &disk == dev_path { + disk_map.insert(partition.to_path_buf(), None); + } + } + } + message_map.insert(dev_path.to_path_buf(), disk_map); } - disk.insert(dev_path.to_path_buf(), Some(op)); } else { - //add to map - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(dev_path.to_path_buf(), Some(op)); - let partitions = block_utils::get_block_partitions()?; - // check if partition parent is device - for partition in &partitions { - if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { - if &disk == dev_path { - disk_map.insert(partition.to_path_buf(), None); + // partition was destroyed...probably + // make parent path + let path = dev_path.to_string_lossy(); + let path = &path[0..path.len() - 1]; + let path = PathBuf::from(path.to_string()); + if path.exists() { + //then make new entry to insert... + if let Some(disk) = message_map.get_mut(&path) { + // we know the partition isn't in the map already... + disk.insert(dev_path.to_path_buf(), Some(op)); + } else { + //add to map + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(path.to_path_buf(), Some(op)); + let partitions = block_utils::get_block_partitions()?; + // check if partition parent is device + for partition in &partitions { + if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { + if disk == path { + disk_map.insert(partition.to_path_buf(), None); + } + } } + message_map.insert(path.to_path_buf(), disk_map); } + } else { + // path just doesn't exist, so error... + error!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ); + return Err(BynarError::from(format!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ))); } - message_map.insert(dev_path.to_path_buf(), disk_map); } } Ok(None) @@ -191,12 +230,26 @@ fn get_map_op( } } } else { - //not partition - //parent is in the map - if let Some(disk) = message_map.get(dev_path) { - if let Some(partition) = disk.get(dev_path) { - // partition in map - return Ok(partition.clone()); + if dev_path.exists() { + //not partition + //parent is in the map + if let Some(disk) = message_map.get(dev_path) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + return Ok(partition.clone()); + } + } + } else { + // partition was destroyed...probably + // make parent path + let path = dev_path.to_string_lossy(); + let path = &path[0..path.len() - 1]; + let path = PathBuf::from(path.to_string()); + if let Some(disk) = message_map.get(&path) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + return Ok(partition.clone()); + } } } } @@ -220,13 +273,28 @@ fn remove_map_op( } } } else { - //not partition - //parent is in the map - if let Some(disk) = message_map.get_mut(dev_path) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), None); - return Ok(partition.clone()); + if dev_path.exists() { + //not partition + //parent is in the map + if let Some(disk) = message_map.get_mut(dev_path) { + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), None); + return Ok(partition.clone()); + } + } + } else { + // partition was destroyed...probably + // make parent path + let path = dev_path.to_string_lossy(); + let path = &path[0..path.len() - 1]; + let path = PathBuf::from(path.to_string()); + if let Some(disk) = message_map.get_mut(&path) { + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), None); + return Ok(partition.clone()); + } } } } @@ -238,7 +306,7 @@ fn remove_map_op( // get the hashmap associated with a diskpath from the op map fn get_disk_map_op( - message_map: &HashMap>>, + message_map: &mut HashMap>>, dev_path: &PathBuf, ) -> BynarResult>> { if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { @@ -247,10 +315,20 @@ fn get_disk_map_op( return Ok(disk.clone()); } } else { - //not partition //parent is in the map - if let Some(disk) = message_map.get(dev_path) { - return Ok(disk.clone()); + if dev_path.exists() { + if let Some(disk) = message_map.get(dev_path) { + return Ok(disk.clone()); + } + } else { + // partition was destroyed...probably + // make parent path + let path = dev_path.to_string_lossy(); + let path = &path[0..path.len() - 1]; + let path = PathBuf::from(path.to_string()); + if let Some(disk) = message_map.get(&path) { + return Ok(disk.clone()); + } } } Err(BynarError::from(format!( @@ -404,6 +482,15 @@ fn check_for_failed_disks( // add the partition state machines? to the replacing list let mut add_replacing = Vec::new(); for state_machine in &replacing { + if !state_machine.block_device.dev_path.exists() { + //partition was deleted + //add partition to the map + add_or_update_map_op( + message_map, + &state_machine.block_device.dev_path, + DiskOp::new(Operation::new(), None, None), + )?; + } let disks = get_disk_map_op(message_map, &state_machine.block_device.dev_path)?; // uh, get list of keys in disks and filter usable list for keypath? let mut add: Vec<_> = usable_states @@ -873,6 +960,7 @@ fn handle_operation_result( handle_add_disk_res(pool, &op_res, ticket_id); //update result in the map (in otherwords, just set it to None) remove_map_op(message_map, &path.to_path_buf())?; + return Ok(()); } } error!( @@ -891,6 +979,7 @@ fn handle_operation_result( current_op.ret_val = Some(op_res); //push op back into map add_or_update_map_op(message_map, &dev_path, current_op)?; + return Ok(()); } //otherwise error.... return Err(BynarError::from(format!( @@ -997,6 +1086,7 @@ fn handle_operation_result( OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); add_or_update_operation_detail(pool, &mut operation_detail)?; + return Ok(()); } return Err(BynarError::from(format!( "Disk {} is missing the current operation", @@ -1043,6 +1133,7 @@ fn send_and_recieve( if val.get_outcome() == OpOutcome::Success && val.get_value() { //then ok to run Op::Remove send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; + trace!("Updated map {:?}", message_map); } // safe-to-remove returned false or error'd so we should not remove but let manual handling // delete the remove request in this case (in otherwords, do nothing) @@ -1054,6 +1145,7 @@ fn send_and_recieve( disk_op.op_type ); send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; + trace!("Updated map {:?}", message_map); } } else { // we haven't gotten response from previous request yet, push request to back of queue @@ -1064,6 +1156,7 @@ fn send_and_recieve( // have a safe-to-remove run before (it's always safe-to-remove then remove) // however since the remove operation will run safe-to-remove anyways, it's fine to just run send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; + trace!("Updated map {:?}", message_map); } } } @@ -1078,6 +1171,7 @@ fn send_and_recieve( match helpers::get_first_op_result(&mut message) { Some(outcome) => { //message.drain(0..outcome.write_to_bytes()?.len()); + trace!("Sent map {:?}", message_map); handle_operation_result(message_map, pool, outcome, config)?; } None => { @@ -1343,6 +1437,8 @@ fn main() { info!("Add repaired disks completed"); } }; + debug!("Current Request Map {:?}", message_map); + debug!("Current Message Queue {:?}", message_queue); while now.elapsed() < dur { if daemon { for signal in signals.pending() { @@ -1402,6 +1498,8 @@ fn main() { ) .unwrap(); } + debug!("Request Map after looping {:?}", message_map); + debug!("Message Queue after looping {:?}", message_queue); } debug!("Bynar exited successfully"); notify_slack( From 7c5f52a1e189fd9884a414f7fed2d6c2505409de Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 12 Feb 2020 11:30:10 -0500 Subject: [PATCH 33/76] Fix Error handling for bynar-client' --- src/client.rs | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index f618899..0e73283 100644 --- a/src/client.rs +++ b/src/client.rs @@ -43,7 +43,20 @@ fn add_disk( let message = helpers::get_messages(s)?; if !message.is_empty() { let op_result = get_message!(OpOutcomeResult, &message)?; - return Ok(op_result); + match op_result.get_result() { + ResultType::OK => { + return Ok(op_result); + } + ResultType::ERR => { + if op_result.has_error_msg() { + let msg = op_result.get_error_msg(); + return Err(BynarError::from(op_result.get_error_msg())); + } else { + error!("error_msg not set"); + return Err(BynarError::from("error_msg not set")); + } + } + } } } } @@ -97,7 +110,20 @@ fn remove_disk( let message = helpers::get_messages(s)?; if !message.is_empty() { let op_result = get_message!(OpOutcomeResult, &message)?; - return Ok(op_result); + match op_result.get_result() { + ResultType::OK => { + return Ok(op_result); + } + ResultType::ERR => { + if op_result.has_error_msg() { + let msg = op_result.get_error_msg(); + return Err(BynarError::from(op_result.get_error_msg())); + } else { + error!("error_msg not set"); + return Err(BynarError::from("error_msg not set")); + } + } + } } } } From b2a9a96802458e0fd7e48f1b71070a165b31babe Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 12 Feb 2020 11:38:54 -0500 Subject: [PATCH 34/76] added checks before removing partitions/unmounting partitions --- src/backend/ceph.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index dff349e..76663bb 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -874,10 +874,18 @@ impl CephBackend { let mut part1: String = dev_path.to_string_lossy().to_string(); part1.push_str("1"); let part1 = Path::new(&part1); - block_utils::unmount_device(&part1)?; + let osd_dir = Path::new("/var/lib/ceph/osd/").join(&format!("ceph-{}", osd_id)); + // check if the device path exists (partition may have been deleted) + if part1.exists() { + // check if the osd_dir is mounted (might not be if partitions have been deleted) + if block_utils::is_mounted(&osd_dir)? { + // unmount the partition + debug!("Unmount {}", part1.display()); + block_utils::unmount_device(&part1)?; + } + } // remove the osd directory - let osd_dir = Path::new("/var/lib/ceph/osd/").join(&format!("ceph-{}", osd_id)); if osd_dir.exists() { debug!("Cleaning up /var/lib/ceph/osd/ceph-{}", osd_id); match remove_dir_all(osd_dir) { From 1d332c5464fea0aff79889f3f28e0dce10495198 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 12 Feb 2020 14:33:24 -0500 Subject: [PATCH 35/76] SafeToRemove returning error is expected, disk path might not be removable --- src/main.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index c65adc1..f0a36ef 100644 --- a/src/main.rs +++ b/src/main.rs @@ -944,7 +944,8 @@ fn handle_operation_result( } Op::SafeToRemove => { error!("SafeToRemove disk failed : {}", msg); - return Err(BynarError::from(msg)); + // no need to error out, but update the map. Error outcomes are expected for SafeToRemove. + // Ex. you removed a disk first before the partition. } _ => {} } @@ -1142,7 +1143,7 @@ fn send_and_recieve( // this technically shouldn't happen though, so print an error! error!( "Previous request {:?} has finished, but hasn't been reset", - disk_op.op_type + disk_op ); send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; trace!("Updated map {:?}", message_map); From 7f6efe2dd271d15d851302b1cbe9163712c92fb5 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 17 Feb 2020 09:15:05 -0500 Subject: [PATCH 36/76] Handle looping and fix error out, Change handle all_finished to check for a safely completed remove operation. If none do usual, otherwise use the safe one, if all safe-to-remove finished (as unsuccessful) return, figure out how to only run safe-to-remove/remove once, handle errors properly --- src/main.rs | 235 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 204 insertions(+), 31 deletions(-) diff --git a/src/main.rs b/src/main.rs index f0a36ef..db8e524 100644 --- a/src/main.rs +++ b/src/main.rs @@ -75,9 +75,12 @@ impl DiskOp { } // create a message map to handle list of disk-manager requests -fn create_msg_map() -> BynarResult>>> { +fn create_msg_map( + pool: &Pool, + host_mapping: &HostDetailsMapping, +) -> BynarResult>>> { // List out currently mounted block_devices - let devices: Vec = block_utils::get_block_devices()? + let mut devices: Vec = block_utils::get_block_devices()? .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { @@ -94,8 +97,29 @@ fn create_msg_map() -> BynarResult = + in_progress::get_devices_from_db(pool, host_mapping.storage_detail_id)? + .into_iter() + .map(|(id, name, path)| path) + .collect(); let mut map: HashMap>> = HashMap::new(); - let partitions = block_utils::get_block_partitions()?; + + let partitions: Vec = db_devices + .clone() + .into_iter() + .filter(|p| match block_utils::is_disk(p) { + Err(_) => p.to_string_lossy().chars().last().unwrap().is_digit(10), + Ok(b) => b, + }) + .collect(); + let mut disks: Vec = db_devices + .into_iter() + .filter(|p| match block_utils::is_disk(p) { + Err(_) => !p.to_string_lossy().chars().last().unwrap().is_digit(10), + Ok(b) => b, + }) + .collect(); + devices.append(&mut disks); // for each block device add its partitions to the HashMap // add them to HashMap devices.iter().for_each(|device| { @@ -183,12 +207,17 @@ fn add_or_update_map_op( if path.exists() { //then make new entry to insert... if let Some(disk) = message_map.get_mut(&path) { - // we know the partition isn't in the map already... - disk.insert(dev_path.to_path_buf(), Some(op)); + // check if the partition is in the map + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), Some(op)); + return Ok(partition.clone()); + } + disk.insert(dev_path.to_path_buf(), None); } else { //add to map let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(path.to_path_buf(), Some(op)); + disk_map.insert(path.to_path_buf(), None); let partitions = block_utils::get_block_partitions()?; // check if partition parent is device for partition in &partitions { @@ -495,7 +524,31 @@ fn check_for_failed_disks( // uh, get list of keys in disks and filter usable list for keypath? let mut add: Vec<_> = usable_states .iter() - .filter(|state_machine| disks.contains_key(&state_machine.block_device.dev_path)) + .filter(|state_machine| { + if disks.contains_key(&state_machine.block_device.dev_path) { + //check hashmap of the device path == None, or OpType != SafeToRemove || Remove + match get_map_op(&message_map, &state_machine.block_device.dev_path).unwrap() { + Some(op) => { + // check if in_progress + info!("Connecting to database to check if disk is in progress"); + let in_progress = in_progress::is_hardware_waiting_repair( + pool, + host_mapping.storage_detail_id, + &state_machine.block_device.dev_path.to_string_lossy(), + None, + ) + .unwrap(); + //check if op_type == SafeToRemove || Remove + !(op.op_type == Op::SafeToRemove + || op.op_type == Op::Remove + || in_progress) + } + None => true, + } + } else { + false + } + }) .collect(); add_replacing.append(&mut add); } @@ -515,7 +568,7 @@ fn check_for_failed_disks( .collect(); replacing.iter().for_each(|state_machine| { - //add safeToRemove + Remove request to message_queue, checking if its already in first + // add safeToRemove + Remove request to message_queue, checking if its already in first // create Operation, description, and get the op_id let mut desc = description.clone(); add_disk_to_description( @@ -924,6 +977,7 @@ fn handle_add_disk_res( //handle return of Operation fn handle_operation_result( message_map: &mut HashMap>>, + host_info: &Host, pool: &Pool, op_res: OpOutcomeResult, config: &ConfigSettings, @@ -940,11 +994,14 @@ fn handle_operation_result( } Op::Remove => { error!("Remove disk failed : {}", msg); - return Err(BynarError::from(msg)); + // return Err(BynarError::from(msg)); + // no need to error out, but update the map. Error outcomes are also expected for Remove, + // since remove might be run on the disk and the partition...or the input path is not in the + // config file } Op::SafeToRemove => { error!("SafeToRemove disk failed : {}", msg); - // no need to error out, but update the map. Error outcomes are expected for SafeToRemove. + // no need to error out, but update the map. Error outcomes are expected for SafeToRemove. // Ex. you removed a disk first before the partition. } _ => {} @@ -982,10 +1039,84 @@ fn handle_operation_result( add_or_update_map_op(message_map, &dev_path, current_op)?; return Ok(()); } + // check if allll the other paths in disk are SafeToRemove (and not Success) + // check if all ops in the disk have finished + let disk = get_disk_map_op(message_map, &dev_path)?; + let mut all_finished = true; + disk.iter().for_each(|(k, v)| { + //check if value finished + if let Some(val) = v { + if let Some(ret) = &val.ret_val { + if ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove) + { + all_finished = false; + } + } + } + }); + // if so, notify slack + if all_finished { + debug!("safe to remove: false"); + let _ = notify_slack( + config, + &format!( + "Need to remove disk {} but it's not safe \ + on host: {}. I need a human. Filing a ticket", + dev_path.display(), + host_info.hostname, + ), + ); + // get the path of the disk + let path = + if let Some(parent) = block_utils::get_parent_devpath_from_path(&dev_path)? { + parent + } else { + dev_path + }; + // get the current op associated with the disk + if let Some(current_op) = get_map_op(message_map, &path)? { + let description = match current_op.description { + Some(d) => d, + None => { + return Err(BynarError::from(format!( + "Disk {} on host {} is missing a description", + path.display(), + host_info.hostname + ))) + } + }; + let op_id = match current_op.operation_id { + None => { + error!("Operation not recorded for {}", path.display()); + 0 + } + Some(i) => i, + }; + //open JIRA ticket+ notify slack + debug!("Creating support ticket"); + let ticket_id = + create_support_ticket(config, "Bynar: Dead disk", &description)?; + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + return Ok(()); + } + return Err(BynarError::from(format!( + "Disk {} on host {} is missing the current operation", + path.display(), + host_info.hostname + ))); + } //otherwise error.... return Err(BynarError::from(format!( - "{} does not have a currently running operation!", - dev_path.display() + "{} on host {} does not have a currently running operation!", + dev_path.display(), + host_info.hostname ))); } Op::Remove => { @@ -993,37 +1124,62 @@ fn handle_operation_result( let dev_path = PathBuf::from(op_res.get_disk()); match op_res.get_outcome() { OpOutcome::Success => { - debug!("Disk {} removal successful", dev_path.display()); + debug!( + "Disk {} on host {} removal successful", + dev_path.display(), + host_info.hostname + ); let _ = notify_slack( config, - &format!("Disk {} removal successful", dev_path.display()), + &format!( + "Disk {} on host {} removal successful", + dev_path.display(), + host_info.hostname + ), ); } OpOutcome::Skipped => { - debug!("Disk {} skipped, disk is not removable", dev_path.display()); + debug!( + "Disk {} on host {} skipped, disk is not removable", + dev_path.display(), + host_info.hostname + ); let _ = notify_slack( config, - &format!("Disk {} skipped, disk is not removable", dev_path.display()), + &format!( + "Disk {} on host {} skipped, disk is not removable", + dev_path.display(), + host_info.hostname + ), ); } OpOutcome::SkipRepeat => { if op_res.has_value() { debug!( - "Disk {} currently undergoing another operation, skipping", - dev_path.display() + "Disk {} on host {} currently undergoing another operation, skipping", + dev_path.display(), + host_info.hostname ); let _ = notify_slack( config, &format!( - "Disk {} currently undergoing another operation, skipping", - dev_path.display() + "Disk {} on host {} currently undergoing another operation, skipping", + dev_path.display(), host_info.hostname ), ); } else { - debug!("Disk {} already removed, skipping.", dev_path.display()); + debug!( + "Disk {} on host {} already removed, skipping.", + dev_path.display(), + host_info.hostname + ); let _ = notify_slack( config, - &format!("Disk {} already removed, skipping.", dev_path.display()), + &format!( + "Disk {} on host {} already removed, skipping.", + dev_path.display(), + host_info.hostname + ), ); } } @@ -1035,8 +1191,9 @@ fn handle_operation_result( add_or_update_map_op(message_map, &dev_path, current_op)?; } else { return Err(BynarError::from(format!( - "{} does not have a currently running operation!", - dev_path.display() + "{} on host {} does not have a currently running operation!", + dev_path.display(), + host_info.hostname ))); } // check if all ops in the disk have finished @@ -1052,6 +1209,14 @@ fn handle_operation_result( }); //if all finished open ticket+ notify slack if all_finished { + let _ = notify_slack( + &config, + &format!( + "Filing a ticket for Host: {}. Drive {} needs removal", + host_info.hostname, + dev_path.display(), + ), + ); // get the path of the disk let path = if let Some(parent) = block_utils::get_parent_devpath_from_path(&dev_path)? { @@ -1111,6 +1276,7 @@ fn send_and_recieve( s: &Socket, message_map: &mut HashMap>>, message_queue: &mut VecDeque<(Operation, Option, Option)>, + host_info: &Host, pool: &Pool, config: &ConfigSettings, client_id: Vec, @@ -1143,13 +1309,14 @@ fn send_and_recieve( // this technically shouldn't happen though, so print an error! error!( "Previous request {:?} has finished, but hasn't been reset", - disk_op + disk_op.op_type ); send_and_update(s, message_map, client_id, (mess, desc, op_id), &path)?; trace!("Updated map {:?}", message_map); } } else { // we haven't gotten response from previous request yet, push request to back of queue + trace!("Have not gotten response yet, push back request {:?}", mess); message_queue.push_back((mess, desc, op_id)); } } else { @@ -1173,7 +1340,7 @@ fn send_and_recieve( Some(outcome) => { //message.drain(0..outcome.write_to_bytes()?.len()); trace!("Sent map {:?}", message_map); - handle_operation_result(message_map, pool, outcome, config)?; + handle_operation_result(message_map, host_info, pool, outcome, config)?; } None => { //Actually, this is a problem since Bynar only sends Add/SafeToRemove/Remove requests @@ -1388,7 +1555,7 @@ fn main() { debug!("Client ID {:?}, len {}", client_id, client_id.len()); let dur = Duration::from_secs(time); let mut message_queue: VecDeque<(Operation, Option, Option)> = VecDeque::new(); - let mut message_map = create_msg_map().unwrap(); + let mut message_map = create_msg_map(&db_pool, &host_details_mapping).unwrap(); 'outer: loop { let now = Instant::now(); match check_for_failed_disks( @@ -1489,18 +1656,24 @@ fn main() { } else { break 'outer; } - send_and_recieve( + match send_and_recieve( &s, &mut message_map, &mut message_queue, + &host_info, &db_pool, &config, client_id.clone(), - ) - .unwrap(); + ) { + Err(e) => { + error!("Send or Receive messages faile with error: {}", e); + break 'outer; + } + _ => info!("Send and Recieve successfully ran"), + }; + debug!("Message Queue after looping {:?}", message_queue); } debug!("Request Map after looping {:?}", message_map); - debug!("Message Queue after looping {:?}", message_queue); } debug!("Bynar exited successfully"); notify_slack( From b345ff63c287f2f56329bc0e4d9de1f6e0f9d5a4 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 17 Feb 2020 11:27:12 -0500 Subject: [PATCH 37/76] Add the Successful call but SafeToRemove is false to all_finished check --- src/main.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index db8e524..780148f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1047,7 +1047,7 @@ fn handle_operation_result( //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if ret.get_outcome() != OpOutcome::Success + if (ret.get_outcome() == OpOutcome::Success && !ret.get_value()) || ret.get_outcome() != OpOutcome::Success && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove) { @@ -1096,14 +1096,29 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); - let ticket_id = + // temporarily disable error out + match create_support_ticket(config, "Bynar: Dead disk", &description) { + Ok(ticket_id) => { + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + Err(e) => { + let _ = + notify_slack(config, &format!("Unable to create ticket {:?}", e)); + } + } + /*let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); // update operation detials in DB let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; + add_or_update_operation_detail(pool, &mut operation_detail)?;*/ return Ok(()); } return Err(BynarError::from(format!( From 258789342570bc5d2f595f548828bcc544311299 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 18 Feb 2020 13:15:00 -0500 Subject: [PATCH 38/76] Test without ticket erroring out --- src/main.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 780148f..cf3710e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1047,7 +1047,7 @@ fn handle_operation_result( //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if (ret.get_outcome() == OpOutcome::Success && !ret.get_value()) || ret.get_outcome() != OpOutcome::Success + if ret.get_outcome() != OpOutcome::Success && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove) { @@ -1259,6 +1259,22 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); + match create_support_ticket(config, "Bynar: Dead disk", &description) { + Ok(ticket_id) => { + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + Err(e) => { + let _ = + notify_slack(config, &format!("Unable to create ticket {:?}", e)); + } + } + // temporarily disable ticket erroring out + /* let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); @@ -1266,7 +1282,7 @@ fn handle_operation_result( let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; + add_or_update_operation_detail(pool, &mut operation_detail)?;*/ return Ok(()); } return Err(BynarError::from(format!( @@ -1681,7 +1697,7 @@ fn main() { client_id.clone(), ) { Err(e) => { - error!("Send or Receive messages faile with error: {}", e); + error!("Send or Receive messages failed with error: {}", e); break 'outer; } _ => info!("Send and Recieve successfully ran"), From cb6459f5ccb44106d413501c258179f4d5d07433 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 18 Feb 2020 13:16:15 -0500 Subject: [PATCH 39/76] error out on ticket creation failure --- src/main.rs | 37 +++---------------------------------- 1 file changed, 3 insertions(+), 34 deletions(-) diff --git a/src/main.rs b/src/main.rs index cf3710e..e44fc4c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1096,29 +1096,14 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); - // temporarily disable error out - match create_support_ticket(config, "Bynar: Dead disk", &description) { - Ok(ticket_id) => { - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; - } - Err(e) => { - let _ = - notify_slack(config, &format!("Unable to create ticket {:?}", e)); - } - } - /*let ticket_id = + let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); // update operation detials in DB let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?;*/ + add_or_update_operation_detail(pool, &mut operation_detail)?; return Ok(()); } return Err(BynarError::from(format!( @@ -1259,22 +1244,6 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); - match create_support_ticket(config, "Bynar: Dead disk", &description) { - Ok(ticket_id) => { - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; - } - Err(e) => { - let _ = - notify_slack(config, &format!("Unable to create ticket {:?}", e)); - } - } - // temporarily disable ticket erroring out - /* let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); @@ -1282,7 +1251,7 @@ fn handle_operation_result( let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?;*/ + add_or_update_operation_detail(pool, &mut operation_detail)?; return Ok(()); } return Err(BynarError::from(format!( From 780fbdc6a5f41784cb2972ff353b7f5dd0d9c5b4 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 19 Feb 2020 10:10:34 -0500 Subject: [PATCH 40/76] Working on Unit tests, added condition to state machine so itonly skips disks that aren't mounted --- src/in_progress.rs | 3 +- src/main.rs | 87 +++++++++++++++++++++++++++++++++++++++++++--- src/test_disk.rs | 7 ++-- 3 files changed, 90 insertions(+), 7 deletions(-) diff --git a/src/in_progress.rs b/src/in_progress.rs index f820710..6153dc4 100644 --- a/src/in_progress.rs +++ b/src/in_progress.rs @@ -17,7 +17,7 @@ use std::time::Duration; #[cfg(test)] mod tests { use super::super::ConfigSettings; - use block_utils::{Device, FilesystemType, MediaType, ScsiInfo}; + use block_utils::{Device, DeviceType, FilesystemType, MediaType, ScsiInfo}; use simplelog::{Config, TermLogger}; use std::collections::BTreeMap; use std::path::{Path, PathBuf}; @@ -56,6 +56,7 @@ mod tests { id: Some(drive_uuid), name: dev_name, media_type: MediaType::Rotational, + device_type: DeviceType::Disk, capacity: 26214400, fs_type: FilesystemType::Xfs, serial_number: Some("123456".into()), diff --git a/src/main.rs b/src/main.rs index e44fc4c..615a17d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1096,14 +1096,29 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); - let ticket_id = + // temporarily disable error out + match create_support_ticket(config, "Bynar: Dead disk", &description) { + Ok(ticket_id) => { + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + Err(e) => { + let _ = + notify_slack(config, &format!("Unable to create ticket {:?}", e)); + } + } + /*let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); // update operation detials in DB let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; + add_or_update_operation_detail(pool, &mut operation_detail)?;*/ return Ok(()); } return Err(BynarError::from(format!( @@ -1181,7 +1196,7 @@ fn handle_operation_result( host_info.hostname ), ); - } + } } } //update map @@ -1244,6 +1259,22 @@ fn handle_operation_result( }; //open JIRA ticket+ notify slack debug!("Creating support ticket"); + match create_support_ticket(config, "Bynar: Dead disk", &description) { + Ok(ticket_id) => { + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + Err(e) => { + let _ = + notify_slack(config, &format!("Unable to create ticket {:?}", e)); + } + } + // temporarily disable ticket erroring out + /* let ticket_id = create_support_ticket(config, "Bynar: Dead disk", &description)?; debug!("Recording ticket id {} in database", ticket_id); @@ -1251,7 +1282,7 @@ fn handle_operation_result( let mut operation_detail = OperationDetail::new(op_id, OperationType::WaitingForReplacement); operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; + add_or_update_operation_detail(pool, &mut operation_detail)?;*/ return Ok(()); } return Err(BynarError::from(format!( @@ -1682,3 +1713,51 @@ fn main() { ) .expect("Unable to connect to slack"); } + + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + // This tests the filter(s) used to get a list of devices + fn test_filter_block_devices() { + let dev = block_utils::get_block_devices().unwrap(); + //print the list of devices + println!("Devices before filtering: \n{:#?}", dev); + + let devices: Vec = dev.into_iter().filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) + }) + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) + }) + .collect(); + + println!("Devices after filtering: \n{:#?}", devices); + //double check there are no paths that start with sr or loop + assert_eq!(None, devices.into_iter().find(|b| { + if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") || p.to_string_lossy().starts_with("sr") + }else { + true + } + })) + } + + #[test] + // Note: this isn't testing the actual function, since we can't do that, + // this is testing the expected behavior of parts inside the function assuming certain call result + fn test_create_msg_map(){ + let devices = block_utils.get_block_devices().unwrap(); + } +} \ No newline at end of file diff --git a/src/test_disk.rs b/src/test_disk.rs index 34fc96c..b3f7c5e 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -163,6 +163,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, + device_type: DeviceType::Partition, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -219,6 +220,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, + device_type: DeviceType::Partition, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -274,6 +276,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, + device_type: DiskType:: capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -775,8 +778,8 @@ impl Transition for Scan { (false, _) => match run_smart_checks(&Path::new(&device.dev_path)) { Ok(stat) => { device.smart_passed = stat; - // If the device is a Disk, then end the state machine here. - if device.device.device_type == DeviceType::Disk { + // If the device is a Disk, and is not mounted then end the state machine here. + if device.device.device_type == DeviceType::Disk && !block_utils::is_mounted(&device.dev_path)? { if stat { debug!("Disk is healthy"); return State::Good; From 6b71682e6207a35ebc1a8ef4f78625c17628e5ba Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 19 Feb 2020 10:27:49 -0500 Subject: [PATCH 41/76] Added unit testing for filters and creating the request map --- src/main.rs | 46 ++++++++++++++++++++++++++++++++++++++++++++-- src/test_disk.rs | 9 +++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 615a17d..bf774d7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1719,6 +1719,7 @@ fn main() { #[cfg(test)] mod tests { use super::*; + use block_utils::*; #[test] // This tests the filter(s) used to get a list of devices @@ -1757,7 +1758,48 @@ mod tests { #[test] // Note: this isn't testing the actual function, since we can't do that, // this is testing the expected behavior of parts inside the function assuming certain call result - fn test_create_msg_map(){ - let devices = block_utils.get_block_devices().unwrap(); + fn test_create_msg_map_no_partitions(){ + let devices: Vec = block_utils::get_block_devices().unwrap().into_iter() + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) + }) + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) + }) + .collect(); + println!("List of devices: \n{:#?}", devices); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = Vec::new(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Created Hashmap: \n{:#?}", map); + + // check that for every device in devices, there is a hashmap + // in the map with just the device in it (there should be no partitions) + } } \ No newline at end of file diff --git a/src/test_disk.rs b/src/test_disk.rs index b3f7c5e..03916da 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -163,7 +163,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, - device_type: DeviceType::Partition, + device_type: super::DeviceType::Disk, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -220,7 +220,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, - device_type: DeviceType::Partition, + device_type: super::DeviceType::Disk, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -276,7 +276,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, - device_type: DiskType:: + device_type: super::DeviceType::Disk, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -323,6 +323,7 @@ mod tests { id: Some(drive_id), name: dev.file_name().unwrap().to_str().unwrap().to_string(), media_type: super::MediaType::Rotational, + device_type: super::DeviceType::Disk, capacity: 26214400, fs_type: super::FilesystemType::Xfs, serial_number: Some("123456".into()), @@ -779,7 +780,7 @@ impl Transition for Scan { Ok(stat) => { device.smart_passed = stat; // If the device is a Disk, and is not mounted then end the state machine here. - if device.device.device_type == DeviceType::Disk && !block_utils::is_mounted(&device.dev_path)? { + if device.device.device_type == DeviceType::Disk && !block_utils::is_mounted(&device.dev_path).unwrap() { if stat { debug!("Disk is healthy"); return State::Good; From b1109b0e833aebcc83492db4b5e9750572ad7752 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 19 Feb 2020 11:16:00 -0500 Subject: [PATCH 42/76] Unit tests for create_msg_map, creation without partitions, with partitions, with DB devices --- src/main.rs | 171 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 168 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index bf774d7..b07ebb3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1714,8 +1714,6 @@ fn main() { .expect("Unable to connect to slack"); } - - #[cfg(test)] mod tests { use super::*; @@ -1800,6 +1798,173 @@ mod tests { // check that for every device in devices, there is a hashmap // in the map with just the device in it (there should be no partitions) - + devices.iter().for_each(|path| { + let sub_map = map.get(&path.to_path_buf()); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(1, sub_map.len()); + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + } + + #[test] + // Note: this isn't testing the actual function, since we can't do that, + // this is testing the expected behavior of parts inside the function assuming certain call result + fn test_create_msg_map_with_partitions(){ + // since we want to test specifically the partitions we need an explicit device list + let devices: Vec = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdd")].to_vec(); + println!("List of devices: \n{:#?}", devices); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); + println!("List of partitions: \n{:#?}", partitions); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Created Hashmap: \n{:#?}", map); + + // check that for every device in devices, there is a hashmap + // in the map with the device and all its partitions + let sda_map = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")].to_vec(); + let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); + let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); + let sdd_map = [PathBuf::from("/dev/sdd"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); + + //test sda + let sub_map = map.get(&PathBuf::from("/dev/sda")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(3, sub_map.len()); + sda_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdb + let sub_map = map.get(&PathBuf::from("/dev/sdb")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(1, sub_map.len()); + sdb_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdc + let sub_map = map.get(&PathBuf::from("/dev/sdc")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(2, sub_map.len()); + sdc_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdd + let sub_map = map.get(&PathBuf::from("/dev/sdd")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(4, sub_map.len()); + sdd_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + } + + + #[test] + // Note: this isn't testing the actual function, since we can't do that, + // this is testing the expected behavior of parts inside the function assuming certain call result + fn test_create_msg_map_with_db(){ + // since we want to test specifically the partitions we need an explicit device list + let mut devices: Vec = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdd")].to_vec(); + println!("List of devices: \n{:#?}", devices); + let mut map: HashMap>> = HashMap::new(); + let db_devices: Vec = [PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); + let partitions: Vec = db_devices.clone().into_iter().filter(|p| { + p.to_string_lossy().chars().last().unwrap().is_digit(10) + }).collect(); + let mut disks: Vec = db_devices.into_iter().filter(|p| { + !p.to_string_lossy().chars().last().unwrap().is_digit(10) + }).collect(); + println!("List of DB partitions {:#?}", partitions); + assert_eq!([PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(), partitions); + + println!("List of DB disks {:?}", disks); + assert_eq!([PathBuf::from("/dev/sdc")].to_vec(), disks); + + devices.append(&mut disks); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Created Hashmap: \n{:#?}", map); + + // check that for every device in devices, there is a hashmap + // in the map with the device and all its partitions + let sda_map = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")].to_vec(); + let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); + let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); + let sdd_map = [PathBuf::from("/dev/sdd"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); + + //test sda + let sub_map = map.get(&PathBuf::from("/dev/sda")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(3, sub_map.len()); + sda_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdb + let sub_map = map.get(&PathBuf::from("/dev/sdb")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(1, sub_map.len()); + sdb_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdc + let sub_map = map.get(&PathBuf::from("/dev/sdc")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(2, sub_map.len()); + sdc_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); + + //test sdd + let sub_map = map.get(&PathBuf::from("/dev/sdd")); + assert!(sub_map.is_some()); + let sub_map = sub_map.unwrap(); //this should be safe + assert_eq!(4, sub_map.len()); + sdd_map.iter().for_each(|path| { + assert!(sub_map.get(&path.to_path_buf()).is_some()); + }); } } \ No newline at end of file From dfc2111269c49562dbf52c344bc082e0e385bde0 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 20 Feb 2020 11:25:18 -0500 Subject: [PATCH 43/76] Rearranged the code in add_or_update_op for smaller function + easier to read and added tests for it --- src/main.rs | 591 ++++++++++++++++++++++++++++++++++------------- src/test_disk.rs | 6 +- 2 files changed, 434 insertions(+), 163 deletions(-) diff --git a/src/main.rs b/src/main.rs index b07ebb3..5ab8cc1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -146,101 +146,76 @@ fn create_msg_map( fn add_or_update_map_op( message_map: &mut HashMap>>, dev_path: &PathBuf, - op: DiskOp, + op: Option, ) -> BynarResult> { - if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { - //parent is in the map - if let Some(disk) = message_map.get_mut(&parent) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), Some(op)); - return Ok(partition.clone()); - } - disk.insert(dev_path.to_path_buf(), Some(op)); + let (parent, dev_path) = + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + (parent, dev_path) } else { - //add to map - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(parent.to_path_buf(), Some(op)); - let partitions = block_utils::get_block_partitions()?; - // check if partition parent is device - for partition in &partitions { - if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { - if disk == parent { - disk_map.insert(partition.to_path_buf(), None); - } - } - } - message_map.insert(parent.to_path_buf(), disk_map); - } - } else { - //not partition or partition destroyed - if dev_path.exists() { - //parent is in the map - if let Some(disk) = message_map.get_mut(dev_path) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), Some(op)); - return Ok(partition.clone()); - } - disk.insert(dev_path.to_path_buf(), Some(op)); + if dev_path.exists() { + (dev_path.to_path_buf(), dev_path) } else { - //add to map - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(dev_path.to_path_buf(), Some(op)); - let partitions = block_utils::get_block_partitions()?; - // check if partition parent is device - for partition in &partitions { - if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { - if &disk == dev_path { - disk_map.insert(partition.to_path_buf(), None); - } - } + // partition was destroyed...probably + // make parent path + let mut str_path = dev_path.to_string_lossy().to_string(); + while str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); } - message_map.insert(dev_path.to_path_buf(), disk_map); - } - } else { - // partition was destroyed...probably - // make parent path - let path = dev_path.to_string_lossy(); - let path = &path[0..path.len() - 1]; - let path = PathBuf::from(path.to_string()); - if path.exists() { - //then make new entry to insert... - if let Some(disk) = message_map.get_mut(&path) { - // check if the partition is in the map - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), Some(op)); - return Ok(partition.clone()); - } - disk.insert(dev_path.to_path_buf(), None); + let path = PathBuf::from(str_path.to_string()); + if path.exists() { + (path, dev_path) // partition probably } else { - //add to map - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(path.to_path_buf(), None); - let partitions = block_utils::get_block_partitions()?; - // check if partition parent is device - for partition in &partitions { - if let Some(disk) = block_utils::get_parent_devpath_from_path(&partition)? { - if disk == path { - disk_map.insert(partition.to_path_buf(), None); - } - } + if str_path.starts_with("/dev/sd") + || str_path.starts_with("/dev/hd") + || str_path.starts_with("/dev/nvme") + { + (dev_path.to_path_buf(), dev_path) // disk...probably + } else { + // path just doesn't exist, so error... + error!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ); + return Err(BynarError::from(format!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ))); } - message_map.insert(path.to_path_buf(), disk_map); } - } else { - // path just doesn't exist, so error... - error!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ); - return Err(BynarError::from(format!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ))); } + }; + if let Some(disk) = message_map.get_mut(&parent) { + if let Some(partition) = disk.clone().get(dev_path) { + // partition in map + disk.insert(dev_path.to_path_buf(), op); + return Ok(partition.clone()); } + if &parent == dev_path { + // if exists Some(disk) then dev_path should also exist (since creation) of entry in map requires it + error!("Map is missing the disk entry but disk {} exists in the map", parent.display()); + return Err(BynarError::from(format!( + "Map is missing the disk entry but disk {} exists in the map", parent.display() + ))) + } + disk.insert(dev_path.to_path_buf(), op); + } else { + //add to map + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(parent.to_path_buf(), None); + let partitions = block_utils::get_block_partitions()?; + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&parent.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + disk_map.insert(dev_path.to_path_buf(), op); + message_map.insert(parent.to_path_buf(), disk_map); } Ok(None) } @@ -514,11 +489,7 @@ fn check_for_failed_disks( if !state_machine.block_device.dev_path.exists() { //partition was deleted //add partition to the map - add_or_update_map_op( - message_map, - &state_machine.block_device.dev_path, - DiskOp::new(Operation::new(), None, None), - )?; + add_or_update_map_op(message_map, &state_machine.block_device.dev_path, None)?; } let disks = get_disk_map_op(message_map, &state_machine.block_device.dev_path)?; // uh, get list of keys in disks and filter usable list for keypath? @@ -586,12 +557,12 @@ fn check_for_failed_disks( } Some(i) => i, }; - let mut op = helpers::make_op!( + let op = helpers::make_op!( SafeToRemove, format!("{}", state_machine.block_device.dev_path.display()) ); let mess: (Operation, Option, Option) = (op, Some(desc.clone()), Some(op_id)); - let mut op2 = helpers::make_op!( + let op2 = helpers::make_op!( Remove, format!("{}", state_machine.block_device.dev_path.display()) ); @@ -943,7 +914,7 @@ fn send_and_update( trace!("add operation to map"); //check optype, make op let disk_op = DiskOp::new(mess, desc, op_id); - add_or_update_map_op(message_map, &path, disk_op)?; + add_or_update_map_op(message_map, &path, Some(disk_op))?; } Ok(()) } @@ -1036,7 +1007,7 @@ fn handle_operation_result( if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { current_op.ret_val = Some(op_res); //push op back into map - add_or_update_map_op(message_map, &dev_path, current_op)?; + add_or_update_map_op(message_map, &dev_path, Some(current_op))?; return Ok(()); } // check if allll the other paths in disk are SafeToRemove (and not Success) @@ -1196,14 +1167,14 @@ fn handle_operation_result( host_info.hostname ), ); - } + } } } //update map if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { current_op.ret_val = Some(op_res); //push op back into map - add_or_update_map_op(message_map, &dev_path, current_op)?; + add_or_update_map_op(message_map, &dev_path, Some(current_op))?; } else { return Err(BynarError::from(format!( "{} on host {} does not have a currently running operation!", @@ -1719,60 +1690,69 @@ mod tests { use super::*; use block_utils::*; + // ------------------- Test create_msg_map ------------------ + #[test] // This tests the filter(s) used to get a list of devices fn test_filter_block_devices() { let dev = block_utils::get_block_devices().unwrap(); //print the list of devices println!("Devices before filtering: \n{:#?}", dev); - - let devices: Vec = dev.into_iter().filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true + + let devices: Vec = dev + .into_iter() + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") - } else { - true + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) }) - }) - .collect(); + .collect(); println!("Devices after filtering: \n{:#?}", devices); //double check there are no paths that start with sr or loop - assert_eq!(None, devices.into_iter().find(|b| { - if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") || p.to_string_lossy().starts_with("sr") - }else { - true - } - })) + assert_eq!( + None, + devices.into_iter().find(|b| { + if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") || p.to_string_lossy().starts_with("sr") + } else { + true + } + }) + ) } #[test] - // Note: this isn't testing the actual function, since we can't do that, + // Note: this isn't testing the actual function, since we can't do that, // this is testing the expected behavior of parts inside the function assuming certain call result - fn test_create_msg_map_no_partitions(){ - let devices: Vec = block_utils::get_block_devices().unwrap().into_iter() - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true + fn test_create_msg_map_no_partitions() { + let devices: Vec = block_utils::get_block_devices() + .unwrap() + .into_iter() + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("sr") + } else { + true + }) }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") - } else { - true + .filter(|b| { + !(if let Some(p) = b.as_path().file_name() { + p.to_string_lossy().starts_with("loop") + } else { + true + }) }) - }) - .collect(); + .collect(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); let partitions: Vec = Vec::new(); @@ -1796,7 +1776,7 @@ mod tests { println!("Created Hashmap: \n{:#?}", map); - // check that for every device in devices, there is a hashmap + // check that for every device in devices, there is a hashmap // in the map with just the device in it (there should be no partitions) devices.iter().for_each(|path| { let sub_map = map.get(&path.to_path_buf()); @@ -1808,14 +1788,28 @@ mod tests { } #[test] - // Note: this isn't testing the actual function, since we can't do that, + // Note: this isn't testing the actual function, since we can't do that, // this is testing the expected behavior of parts inside the function assuming certain call result - fn test_create_msg_map_with_partitions(){ - // since we want to test specifically the partitions we need an explicit device list - let devices: Vec = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdd")].to_vec(); + fn test_create_msg_map_with_partitions() { + // since we want to test specifically the partitions we need an explicit device list + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); println!("List of partitions: \n{:#?}", partitions); devices.iter().for_each(|device| { // make a new hashmap @@ -1837,13 +1831,24 @@ mod tests { println!("Created Hashmap: \n{:#?}", map); - // check that for every device in devices, there is a hashmap + // check that for every device in devices, there is a hashmap // in the map with the device and all its partitions - let sda_map = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")].to_vec(); + let sda_map = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + ] + .to_vec(); let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); - let sdd_map = [PathBuf::from("/dev/sdd"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); - + let sdd_map = [ + PathBuf::from("/dev/sdd"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + //test sda let sub_map = map.get(&PathBuf::from("/dev/sda")); assert!(sub_map.is_some()); @@ -1881,24 +1886,51 @@ mod tests { }); } - #[test] - // Note: this isn't testing the actual function, since we can't do that, + // Note: this isn't testing the actual function, since we can't do that, // this is testing the expected behavior of parts inside the function assuming certain call result - fn test_create_msg_map_with_db(){ - // since we want to test specifically the partitions we need an explicit device list - let mut devices: Vec = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdd")].to_vec(); + fn test_create_msg_map_with_db() { + // since we want to test specifically the partitions we need an explicit device list + let mut devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); - let db_devices: Vec = [PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); - let partitions: Vec = db_devices.clone().into_iter().filter(|p| { - p.to_string_lossy().chars().last().unwrap().is_digit(10) - }).collect(); - let mut disks: Vec = db_devices.into_iter().filter(|p| { - !p.to_string_lossy().chars().last().unwrap().is_digit(10) - }).collect(); + let db_devices: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + let partitions: Vec = db_devices + .clone() + .into_iter() + .filter(|p| p.to_string_lossy().chars().last().unwrap().is_digit(10)) + .collect(); + let mut disks: Vec = db_devices + .into_iter() + .filter(|p| !p.to_string_lossy().chars().last().unwrap().is_digit(10)) + .collect(); println!("List of DB partitions {:#?}", partitions); - assert_eq!([PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2"), PathBuf::from("/dev/sdc1"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(), partitions); + assert_eq!( + [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3") + ] + .to_vec(), + partitions + ); println!("List of DB disks {:?}", disks); assert_eq!([PathBuf::from("/dev/sdc")].to_vec(), disks); @@ -1924,13 +1956,24 @@ mod tests { println!("Created Hashmap: \n{:#?}", map); - // check that for every device in devices, there is a hashmap + // check that for every device in devices, there is a hashmap // in the map with the device and all its partitions - let sda_map = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")].to_vec(); + let sda_map = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + ] + .to_vec(); let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); - let sdd_map = [PathBuf::from("/dev/sdd"), PathBuf::from("/dev/sdd1"), PathBuf::from("/dev/sdd2"), PathBuf::from("/dev/sdd3")].to_vec(); - + let sdd_map = [ + PathBuf::from("/dev/sdd"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + //test sda let sub_map = map.get(&PathBuf::from("/dev/sda")); assert!(sub_map.is_some()); @@ -1967,4 +2010,230 @@ mod tests { assert!(sub_map.get(&path.to_path_buf()).is_some()); }); } + + // ----------- Test the add_or_update_map_op function ------------- + #[test] + // test if, given a partition path that is not in the map (but the parent is) + // add the partition to the map with the given operation + fn test_add_or_update_map_op_partition_add() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(PathBuf::from("/dev/sda"), None); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sda1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!(map.get(&parent).unwrap().get(&insert_path).is_none()); + let disk = map.get_mut(&parent).unwrap(); // we know map should have this + if let Some(_) = disk.clone().get(&insert_path) { + // partition in map + panic!("/dev/sda1 should not be in the map"); + } + disk.insert(insert_path.to_path_buf(), Some(disk_op)); + + println!("New Map: {:#?}", map); + assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); + } + + #[test] + // test if, given a partition path that is in the map, update the map + // with the given operation + fn test_add_or_update_map_op_partition_update() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(PathBuf::from("/dev/sda"), None); + let mut op = Operation::new(); + op.set_Op_type(Op::Remove); + let disk_op = DiskOp::new(op, Some("test update".to_string()), Some(0)); + disk_map.insert(PathBuf::from("/dev/sda1"), Some(disk_op)); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sda1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!( + map.get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .as_ref() + .unwrap() + .op_type + == Op::Remove + ); + let disk = map.get_mut(&parent).unwrap(); // we know map should have this + if let Some(_) = disk.clone().get(&insert_path) { + // partition in map + disk.insert(insert_path.to_path_buf(), Some(disk_op)); + } else { + panic!("/dev/sda1 should be in the map"); + } + println!("New Map: {:#?}", map); + assert!( + map.get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .as_ref() + .unwrap() + .op_type + == Op::Add + ); + } + + #[test] + // test if, given a partition path that is not in the map and whose parent is not + // in the map, insert the partition + parent disk into the map + fn test_add_or_update_map_op_partition_insert() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(PathBuf::from("/dev/sda"), None); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sdb1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sdb"); + assert!(map.get(&parent).is_none()); + let mut disk_map: HashMap> = HashMap::new(); // we know map doesn't have this + disk_map.insert(parent.to_path_buf(), None); + + let partitions = [PathBuf::from("/dev/sdb1"), PathBuf::from("/dev/sdb2")].to_vec(); + partitions + .iter() + .filter(|partition| partition.to_string_lossy().contains("/dev/sdb")) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); + map.insert(parent.to_path_buf(), disk_map); + println!("New Map: {:#?}", map); + assert!(map.get(&parent).is_some()); + assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); + assert!(map + .get(&parent) + .unwrap() + .get(&PathBuf::from("/dev/sdb2")) + .is_some()); + } + + #[test] + // test if, given a disk path that is not in the disk map (but is in the req map) + // this should error out + fn test_add_or_update_map_op_parent_error() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sda"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!(map.get(&parent).is_some()); + assert!(map.get(&parent).unwrap().get(&insert_path).is_none()); + if parent == insert_path{ + //success, error behavior is in here, in the actual function + println!("Function would error here"); + } + else{ + panic!("These should be equivalent..."); + } + } + + #[test] + // test if, given a disk path that is in the disk map + // update the disk map with the given operation + fn test_add_or_update_map_op_parent_update() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + let mut op = Operation::new(); + op.set_Op_type(Op::Remove); + let disk_op = DiskOp::new(op, Some("test update".to_string()), Some(0)); + disk_map.insert(PathBuf::from("/dev/sda"), Some(disk_op)); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sda"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!( + map.get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .as_ref() + .unwrap() + .op_type + == Op::Remove + ); + let disk = map.get_mut(&parent).unwrap(); // we know map should have this + if let Some(_) = disk.clone().get(&insert_path) { + // partition in map + disk.insert(insert_path.to_path_buf(), Some(disk_op)); + } else { + panic!("/dev/sda should be in the map"); + } + println!("New Map: {:#?}", map); + assert!( + map.get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .as_ref() + .unwrap() + .op_type + == Op::Add + ); + } + + #[test] + // test if, given a disk path that is not in the disk map nor the req map + // create a new disk map with the disk path and insert into the req map + fn test_add_or_update_map_op_parent_insert() { + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(PathBuf::from("/dev/sda"), None); + map.insert(PathBuf::from("/dev/sda"), disk_map); + + println!("Initial Map: {:#?}", map); + let insert_path = PathBuf::from("/dev/sdb"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sdb"); + assert!(map.get(&parent).is_none()); + let mut disk_map: HashMap> = HashMap::new(); // we know map doesn't have this + disk_map.insert(parent.to_path_buf(), None); + + let partitions = [PathBuf::from("/dev/sdb1"), PathBuf::from("/dev/sdb2")].to_vec(); + partitions + .iter() + .filter(|partition| partition.to_string_lossy().contains("/dev/sdb")) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); + map.insert(parent.to_path_buf(), disk_map); + println!("New Map: {:#?}", map); + assert!(map.get(&parent).is_some()); + assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); + assert!(map + .get(&parent) + .unwrap() + .get(&PathBuf::from("/dev/sdb")) + .is_some()); + } } \ No newline at end of file diff --git a/src/test_disk.rs b/src/test_disk.rs index 03916da..c20988c 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -780,7 +780,9 @@ impl Transition for Scan { Ok(stat) => { device.smart_passed = stat; // If the device is a Disk, and is not mounted then end the state machine here. - if device.device.device_type == DeviceType::Disk && !block_utils::is_mounted(&device.dev_path).unwrap() { + if device.device.device_type == DeviceType::Disk + && !block_utils::is_mounted(&device.dev_path).unwrap() + { if stat { debug!("Disk is healthy"); return State::Good; @@ -1447,7 +1449,7 @@ pub fn check_all_disks( .cloned(); debug!("thread {} scsi_info: {:?}", process::id(), scsi_info); //Update device here? Create new device? - if let Some((i, opt)) = scsi_info.clone() { + if let Some((i, _opt)) = scsi_info.clone() { device.scsi_info = i; } debug!("thread {} device: {:?}", process::id(), device); From e030a1cb1af6fb30be799c21a64d8517f84743b5 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 20 Feb 2020 14:09:21 -0500 Subject: [PATCH 44/76] Moved repeat code into seperate key request function, squished down req-map operations --- src/main.rs | 218 +++++++++++++++++++++------------------------------- 1 file changed, 89 insertions(+), 129 deletions(-) diff --git a/src/main.rs b/src/main.rs index 5ab8cc1..693b1a0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -142,48 +142,52 @@ fn create_msg_map( Ok(map) } +// given a path, return a parent-child, or parent-parent tuple to +// look through the request map with, or error +fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { + if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { + Ok((parent, dev_path)) + } else { + if dev_path.exists() { + Ok((dev_path.to_path_buf(), dev_path)) + } else { + // partition was destroyed...probably + // make parent path + let mut str_path = dev_path.to_string_lossy().to_string(); + while str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); + } + let path = PathBuf::from(str_path.to_string()); + if path.exists() { + Ok((path, dev_path)) // partition probably + } else { + if str_path.starts_with("/dev/sd") + || str_path.starts_with("/dev/hd") + || str_path.starts_with("/dev/nvme") + { + Ok((dev_path.to_path_buf(), dev_path)) // disk...probably + } else { + // path just doesn't exist, so error... + error!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ); + return Err(BynarError::from(format!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ))); + } + } + } + } +} // add or update an operation to the message map. If an operation is already ongoing, update op and return the old operation fn add_or_update_map_op( message_map: &mut HashMap>>, dev_path: &PathBuf, op: Option, ) -> BynarResult> { - let (parent, dev_path) = - if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { - (parent, dev_path) - } else { - if dev_path.exists() { - (dev_path.to_path_buf(), dev_path) - } else { - // partition was destroyed...probably - // make parent path - let mut str_path = dev_path.to_string_lossy().to_string(); - while str_path.chars().last().unwrap().is_digit(10) { - str_path = str_path[0..str_path.len() - 1].to_string(); - } - let path = PathBuf::from(str_path.to_string()); - if path.exists() { - (path, dev_path) // partition probably - } else { - if str_path.starts_with("/dev/sd") - || str_path.starts_with("/dev/hd") - || str_path.starts_with("/dev/nvme") - { - (dev_path.to_path_buf(), dev_path) // disk...probably - } else { - // path just doesn't exist, so error... - error!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ); - return Err(BynarError::from(format!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ))); - } - } - } - }; + let (parent, dev_path) = get_request_keys(dev_path)?; if let Some(disk) = message_map.get_mut(&parent) { if let Some(partition) = disk.clone().get(dev_path) { // partition in map @@ -192,10 +196,14 @@ fn add_or_update_map_op( } if &parent == dev_path { // if exists Some(disk) then dev_path should also exist (since creation) of entry in map requires it - error!("Map is missing the disk entry but disk {} exists in the map", parent.display()); + error!( + "Map is missing the disk entry but disk {} exists in the map", + parent.display() + ); return Err(BynarError::from(format!( - "Map is missing the disk entry but disk {} exists in the map", parent.display() - ))) + "Map is missing the disk entry but disk {} exists in the map", + parent.display() + ))); } disk.insert(dev_path.to_path_buf(), op); } else { @@ -225,36 +233,12 @@ fn get_map_op( message_map: &HashMap>>, dev_path: &PathBuf, ) -> BynarResult> { - if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { - //parent is in the map - if let Some(disk) = message_map.get(&parent) { - if let Some(partition) = disk.get(dev_path) { - // partition in map - return Ok(partition.clone()); - } - } - } else { - if dev_path.exists() { - //not partition - //parent is in the map - if let Some(disk) = message_map.get(dev_path) { - if let Some(partition) = disk.get(dev_path) { - // partition in map - return Ok(partition.clone()); - } - } - } else { - // partition was destroyed...probably - // make parent path - let path = dev_path.to_string_lossy(); - let path = &path[0..path.len() - 1]; - let path = PathBuf::from(path.to_string()); - if let Some(disk) = message_map.get(&path) { - if let Some(partition) = disk.get(dev_path) { - // partition in map - return Ok(partition.clone()); - } - } + let (parent, dev_path) = get_request_keys(dev_path)?; + //parent is in the map + if let Some(disk) = message_map.get(&parent) { + if let Some(partition) = disk.get(dev_path) { + // partition in map + return Ok(partition.clone()); } } Ok(None) @@ -266,40 +250,15 @@ fn remove_map_op( message_map: &mut HashMap>>, dev_path: &PathBuf, ) -> BynarResult> { - if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { - //parent is in the map - if let Some(disk) = message_map.get_mut(&parent) { - if let Some(partition) = disk.clone().get(dev_path) { - //set point as None - disk.insert(dev_path.to_path_buf(), None); - // partition in map - return Ok(partition.clone()); - } - } - } else { - if dev_path.exists() { - //not partition - //parent is in the map - if let Some(disk) = message_map.get_mut(dev_path) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), None); - return Ok(partition.clone()); - } - } - } else { - // partition was destroyed...probably - // make parent path - let path = dev_path.to_string_lossy(); - let path = &path[0..path.len() - 1]; - let path = PathBuf::from(path.to_string()); - if let Some(disk) = message_map.get_mut(&path) { - if let Some(partition) = disk.clone().get(dev_path) { - // partition in map - disk.insert(dev_path.to_path_buf(), None); - return Ok(partition.clone()); - } - } + let (parent, dev_path) = get_request_keys(dev_path)?; + + //parent is in the map + if let Some(disk) = message_map.get_mut(&parent) { + if let Some(partition) = disk.clone().get(dev_path) { + //set point as None + disk.insert(dev_path.to_path_buf(), None); + // partition in map + return Ok(partition.clone()); } } Err(BynarError::from(format!( @@ -313,27 +272,10 @@ fn get_disk_map_op( message_map: &mut HashMap>>, dev_path: &PathBuf, ) -> BynarResult>> { - if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { - //parent is in the map - if let Some(disk) = message_map.get(&parent) { - return Ok(disk.clone()); - } - } else { - //parent is in the map - if dev_path.exists() { - if let Some(disk) = message_map.get(dev_path) { - return Ok(disk.clone()); - } - } else { - // partition was destroyed...probably - // make parent path - let path = dev_path.to_string_lossy(); - let path = &path[0..path.len() - 1]; - let path = PathBuf::from(path.to_string()); - if let Some(disk) = message_map.get(&path) { - return Ok(disk.clone()); - } - } + let (parent, _) = get_request_keys(dev_path)?; + //parent is in the map + if let Some(disk) = message_map.get(&parent) { + return Ok(disk.clone()); } Err(BynarError::from(format!( "Path {} is not a disk in the map", @@ -2133,7 +2075,7 @@ mod tests { let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); map.insert(PathBuf::from("/dev/sda"), disk_map); - + println!("Initial Map: {:#?}", map); let insert_path = PathBuf::from("/dev/sda"); let op = Operation::new(); @@ -2142,11 +2084,10 @@ mod tests { let parent = PathBuf::from("/dev/sda"); assert!(map.get(&parent).is_some()); assert!(map.get(&parent).unwrap().get(&insert_path).is_none()); - if parent == insert_path{ + if parent == insert_path { //success, error behavior is in here, in the actual function println!("Function would error here"); - } - else{ + } else { panic!("These should be equivalent..."); } } @@ -2236,4 +2177,23 @@ mod tests { .get(&PathBuf::from("/dev/sdb")) .is_some()); } -} \ No newline at end of file + + #[test] + // test if getting the parent from a deleted partition path works + fn test_get_parent_from_deleted_partition() { + let path = PathBuf::from("/dev/sdc2"); + let hd_path = PathBuf::from("/dev/hda12"); + let nvme_path = PathBuf::from("/dev/nvme0n1p3"); // test this one once nvme implemented + let mut str_path = path.to_string_lossy().to_string(); + while str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); + } + assert_eq!("/dev/sdc".to_string(), str_path); + + let mut str_path = hd_path.to_string_lossy().to_string(); + while str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); + } + assert_eq!("/dev/hda".to_string(), str_path); + } +} From 2b1621d43824f2db00eb19f905e5b70f5b9ceb93 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 20 Feb 2020 14:53:41 -0500 Subject: [PATCH 45/76] Add unit tests for the other req map functions --- src/main.rs | 97 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 85 insertions(+), 12 deletions(-) diff --git a/src/main.rs b/src/main.rs index 693b1a0..6ea44ab 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1963,7 +1963,7 @@ mod tests { disk_map.insert(PathBuf::from("/dev/sda"), None); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sda1"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -1977,7 +1977,7 @@ mod tests { } disk.insert(insert_path.to_path_buf(), Some(disk_op)); - println!("New Map: {:#?}", map); + println!("New Map: \n{:#?}", map); assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); } @@ -1994,7 +1994,7 @@ mod tests { disk_map.insert(PathBuf::from("/dev/sda1"), Some(disk_op)); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sda1"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -2017,7 +2017,7 @@ mod tests { } else { panic!("/dev/sda1 should be in the map"); } - println!("New Map: {:#?}", map); + println!("New Map: \n{:#?}", map); assert!( map.get(&parent) .unwrap() @@ -2039,7 +2039,7 @@ mod tests { disk_map.insert(PathBuf::from("/dev/sda"), None); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sdb1"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -2058,7 +2058,7 @@ mod tests { }); disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); map.insert(parent.to_path_buf(), disk_map); - println!("New Map: {:#?}", map); + println!("New Map: \n{:#?}", map); assert!(map.get(&parent).is_some()); assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); assert!(map @@ -2076,7 +2076,7 @@ mod tests { let mut disk_map: HashMap> = HashMap::new(); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sda"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -2104,7 +2104,7 @@ mod tests { disk_map.insert(PathBuf::from("/dev/sda"), Some(disk_op)); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sda"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -2127,7 +2127,7 @@ mod tests { } else { panic!("/dev/sda should be in the map"); } - println!("New Map: {:#?}", map); + println!("New Map: \n{:#?}", map); assert!( map.get(&parent) .unwrap() @@ -2149,7 +2149,7 @@ mod tests { disk_map.insert(PathBuf::from("/dev/sda"), None); map.insert(PathBuf::from("/dev/sda"), disk_map); - println!("Initial Map: {:#?}", map); + println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sdb"); let op = Operation::new(); let disk_op = DiskOp::new(op, Some("test".to_string()), None); @@ -2168,7 +2168,7 @@ mod tests { }); disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); map.insert(parent.to_path_buf(), disk_map); - println!("New Map: {:#?}", map); + println!("New Map: \n{:#?}", map); assert!(map.get(&parent).is_some()); assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); assert!(map @@ -2180,7 +2180,7 @@ mod tests { #[test] // test if getting the parent from a deleted partition path works - fn test_get_parent_from_deleted_partition() { + fn test_get_request_keys_deleted() { let path = PathBuf::from("/dev/sdc2"); let hd_path = PathBuf::from("/dev/hda12"); let nvme_path = PathBuf::from("/dev/nvme0n1p3"); // test this one once nvme implemented @@ -2196,4 +2196,77 @@ mod tests { } assert_eq!("/dev/hda".to_string(), str_path); } + + #[test] + // test get_map_op function + fn test_get_map_op() { + //make map + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + let insert_path = PathBuf::from("/dev/sda1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!(map.get(&parent).is_none()); + let mut disk_map: HashMap> = HashMap::new(); // we know map doesn't have this + disk_map.insert(parent.to_path_buf(), None); + disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); + map.insert(parent.to_path_buf(), disk_map); + println!("Map: \n{:#?}", map); + + assert!(get_map_op(&map, &PathBuf::from("/dev/sda")).unwrap().is_none()); + assert!(get_map_op(&map, &PathBuf::from("/dev/sda1")).unwrap().is_some()); + } + + #[test] + // test remove_map_op function + fn test_remove_map_op() { + //make map + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + let insert_path = PathBuf::from("/dev/sda1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!(map.get(&parent).is_none()); + let mut disk_map: HashMap> = HashMap::new(); // we know map doesn't have this + disk_map.insert(parent.to_path_buf(), None); + disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); + map.insert(parent.to_path_buf(), disk_map); + println!("Map: \n{:#?}", map); + + assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_some()); + remove_map_op(&mut map, &insert_path); + assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_none()); + println!("After Removal: \n{:#?}", map); + } + + #[test] + // test get_disk_map_op + fn test_get_disk_map_op(){ + //make map + let mut map: HashMap>> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + let insert_path = PathBuf::from("/dev/sda1"); + let op = Operation::new(); + let disk_op = DiskOp::new(op, Some("test".to_string()), None); + + let parent = PathBuf::from("/dev/sda"); + assert!(map.get(&parent).is_none()); + let mut disk_map: HashMap> = HashMap::new(); // we know map doesn't have this + disk_map.insert(parent.to_path_buf(), None); + disk_map.insert(insert_path.to_path_buf(), Some(disk_op)); + map.insert(parent.to_path_buf(), disk_map); + println!("Map: \n{:#?}", map); + + let req_disk_map = get_disk_map_op(&mut map, &insert_path).unwrap(); + assert_eq!(2, req_disk_map.len()); + assert!(req_disk_map.get(&insert_path).is_some()); + assert!(req_disk_map.get(&insert_path).unwrap().is_some()); + assert!(req_disk_map.get(&parent).is_some()); + assert!(req_disk_map.get(&parent).unwrap().is_none()); + + } } From 6947fda74f4eafc0c763c945ef6ed09cc922877f Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 21 Feb 2020 11:11:22 -0500 Subject: [PATCH 46/76] check filtering waiting for replacement disks from state machine output --- src/main.rs | 279 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 274 insertions(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6ea44ab..6b7f784 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2215,8 +2215,12 @@ mod tests { map.insert(parent.to_path_buf(), disk_map); println!("Map: \n{:#?}", map); - assert!(get_map_op(&map, &PathBuf::from("/dev/sda")).unwrap().is_none()); - assert!(get_map_op(&map, &PathBuf::from("/dev/sda1")).unwrap().is_some()); + assert!(get_map_op(&map, &PathBuf::from("/dev/sda")) + .unwrap() + .is_none()); + assert!(get_map_op(&map, &PathBuf::from("/dev/sda1")) + .unwrap() + .is_some()); } #[test] @@ -2237,15 +2241,25 @@ mod tests { map.insert(parent.to_path_buf(), disk_map); println!("Map: \n{:#?}", map); - assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_some()); + assert!(map + .get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .is_some()); remove_map_op(&mut map, &insert_path); - assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_none()); + assert!(map + .get(&parent) + .unwrap() + .get(&insert_path) + .unwrap() + .is_none()); println!("After Removal: \n{:#?}", map); } #[test] // test get_disk_map_op - fn test_get_disk_map_op(){ + fn test_get_disk_map_op() { //make map let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); @@ -2267,6 +2281,261 @@ mod tests { assert!(req_disk_map.get(&insert_path).unwrap().is_some()); assert!(req_disk_map.get(&parent).is_some()); assert!(req_disk_map.get(&parent).unwrap().is_none()); + } + + #[test] + // check filter disks that are Waiting for Replacement with map having None + // no in progress check since all paths should have None + fn test_get_replacing_vec_none() { + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // Testing, assuming /dev/sda and /dev/sdc1 are in "WaitingForReplacement" state + // no need to make in progress a variable since all map objects are None + let replacing: Vec<_> = states + .into_iter() + .filter(|path| { + if path == &PathBuf::from("/dev/sda") || path == &PathBuf::from("/dev/sdc1") { + // the two "Waiting for Replacement" states + //simulate get_map_op + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let op = map.get(&parent).unwrap().get(path).unwrap(); + match op { + Some(op) => panic!("Should be None"), + None => true, + } + } else { + false + } + }) + .collect(); + + println!("Replacing: {:#?}", replacing); + assert_eq!( + replacing, + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec() + ); + } + #[test] + // check filter disks that are Waiting for Replacement with map having Add + // in progress yes or no + fn test_get_replacing_vec_add() { + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + let op = Operation::new(); + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(device.to_path_buf(), Some(disk_op)); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + let op = Operation::new(); + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(partition.to_path_buf(), Some(disk_op)); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // Testing, assuming /dev/sda and /dev/sdc1 are in "WaitingForReplacement" state + // in progress is now variable... + let replacing: Vec<_> = states + .into_iter() + .filter(|path| { + if path == &PathBuf::from("/dev/sda") || path == &PathBuf::from("/dev/sdc1") { + // the two "Waiting for Replacement" states + //simulate get_map_op + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let in_progress = path == &PathBuf::from("/dev/sdc1"); //sdc1 in progress, sda is not + let op = map.get(&parent).unwrap().get(path).unwrap(); + match op { + Some(op) => { + !(op.op_type == Op::SafeToRemove + || op.op_type == Op::Remove + || in_progress) + } + None => panic!("Should be Some"), + } + } else { + false + } + }) + .collect(); + + println!("Replacing: {:#?}", replacing); + assert_eq!(replacing, [PathBuf::from("/dev/sda")]); + } + #[test] + // check filter disks that are Waiting for Replacement with map having SafeToRemove || Remove + fn test_get_replacing_vec_exists() { + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + let mut op = Operation::new(); + if device == &PathBuf::from("/dev/sda") { + op.set_Op_type(Op::SafeToRemove); + } + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(device.to_path_buf(), Some(disk_op)); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + let mut op = Operation::new(); + if partition == &PathBuf::from("/dev/sdc1") { + op.set_Op_type(Op::Remove); + } + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(partition.to_path_buf(), Some(disk_op)); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // Testing, assuming /dev/sda and /dev/sdc1 are in "WaitingForReplacement" state + // in progress is now variable... + let replacing: Vec = states + .into_iter() + .filter(|path| { + if path == &PathBuf::from("/dev/sda") || path == &PathBuf::from("/dev/sdc1") { + // the two "Waiting for Replacement" states + //simulate get_map_op + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let in_progress = path == &PathBuf::from("/dev/sdc1"); //sdc1 in progress, sda is not + let op = map.get(&parent).unwrap().get(path).unwrap(); + match op { + Some(op) => { + !(op.op_type == Op::SafeToRemove + || op.op_type == Op::Remove + || in_progress) + } + None => panic!("Should be Some"), + } + } else { + false + } + }) + .collect(); + println!("Replacing: {:#?}", replacing); + let empty: Vec = [].to_vec(); + assert_eq!(replacing, empty); } } From 0dad151e4801668f743e83f03b8d5e4d81111348 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 21 Feb 2020 15:50:37 -0500 Subject: [PATCH 47/76] Add unit tests for getting the right partitions/disks associated with the paths to be removed --- src/main.rs | 294 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 293 insertions(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 6b7f784..b7f9a22 100644 --- a/src/main.rs +++ b/src/main.rs @@ -388,7 +388,7 @@ fn check_for_failed_disks( .into_iter() .collect(); // separate the states into Ok and Errors - let usable_states: Vec<_> = match all_states { + let usable_states: Vec = match all_states { Ok(s) => s, Err(e) => { error!("check_all_disks failed with error: {:?}", e); @@ -2538,4 +2538,296 @@ mod tests { let empty: Vec = [].to_vec(); assert_eq!(replacing, empty); } + + #[test] + // test adding related partitions/disks to list + // map all nones + fn test_add_related_paths_none() { + // init the map + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + disk_map.insert(device.to_path_buf(), None); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + disk_map.insert(partition.to_path_buf(), None); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // create list of "replacing paths" + let mut replacing = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec(); + println!("Initial Replacing: {:#?}", replacing); + // test adding paths + let mut add_replacing = Vec::new(); + replacing.iter().for_each(|path| { + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let disks = map.get(&parent).unwrap(); + let mut add: Vec<_> = states + .iter() + .filter(|state| { + if disks.contains_key(&state.to_path_buf()) { + match map.get(&parent).unwrap().get(&state.to_path_buf()).unwrap() { + Some(op) => panic!("all items in map should be NONE"), + None => true, + } + } else { + false + } + }) + .collect(); + add_replacing.append(&mut add); + }); + + println!("Added values: {:#?}", add_replacing); + let paths = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + ]; + paths.iter().for_each(|path| { + assert!(add_replacing.contains(&path)); + }); + } + + #[test] + // test adding related partitions/disks to list + // map all Add + fn test_add_related_paths_add() { + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + let op = Operation::new(); + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(device.to_path_buf(), Some(disk_op)); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + let op = Operation::new(); + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(partition.to_path_buf(), Some(disk_op)); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // create list of "replacing paths" + let mut replacing = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec(); + println!("Initial Replacing: {:#?}", replacing); + // test adding paths + let mut add_replacing = Vec::new(); + replacing.iter().for_each(|path| { + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let disks = map.get(&parent).unwrap(); + let in_progress = path == &PathBuf::from("/dev/sdc"); + let mut add: Vec<_> = states + .iter() + .filter(|state| { + if disks.contains_key(&state.to_path_buf()) { + match map.get(&parent).unwrap().get(&state.to_path_buf()).unwrap() { + Some(op) => { + !(op.op_type == Op::SafeToRemove + || op.op_type == Op::Remove + || in_progress) + } + None => panic!("all items in map should be SOME"), + } + } else { + false + } + }) + .collect(); + add_replacing.append(&mut add); + }); + + println!("Added values: {:#?}", add_replacing); + let paths = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdc1"), + ]; + paths.iter().for_each(|path| { + assert!(add_replacing.contains(&path)); + }); + } + + #[test] + // test adding related partitions/disks to list + // map all SafeToRemove or Removes + fn test_add_related_paths_empty() { + let devices: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + let mut map: HashMap>> = HashMap::new(); + let partitions: Vec = [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec(); + devices.iter().for_each(|device| { + // make a new hashmap + let mut disk_map: HashMap> = HashMap::new(); + let mut op = Operation::new(); + if !(device == &PathBuf::from("/dev/sda")) { + op.set_Op_type(Op::SafeToRemove); + } + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(device.to_path_buf(), Some(disk_op)); + // check if partition parent is device + partitions + .iter() + .filter(|partition| { + partition + .to_string_lossy() + .contains(&device.to_string_lossy().to_string()) + }) + .for_each(|partition| { + let mut op = Operation::new(); + if !(partition == &PathBuf::from("/dev/sdc1")) { + op.set_Op_type(Op::Remove); + } + let disk_op = DiskOp::new(op, None, None); + disk_map.insert(partition.to_path_buf(), Some(disk_op)); + }); + map.insert(device.to_path_buf(), disk_map); + }); + + println!("Initial Hashmap: \n{:#?}", map); + let states: Vec = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd"), + ] + .to_vec(); + // create list of "replacing paths" + let mut replacing = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec(); + println!("Initial Replacing: {:#?}", replacing); + // test adding paths + let mut add_replacing = Vec::new(); + replacing.iter().for_each(|path| { + let parent = if path == &PathBuf::from("/dev/sdc1") { + PathBuf::from("/dev/sdc") + } else { + path.to_path_buf() + }; + let disks = map.get(&parent).unwrap(); + let in_progress = path == &PathBuf::from("/dev/sdc"); + let mut add: Vec<_> = states + .iter() + .filter(|state| { + if disks.contains_key(&state.to_path_buf()) { + match map.get(&parent).unwrap().get(&state.to_path_buf()).unwrap() { + Some(op) => { + !(op.op_type == Op::SafeToRemove + || op.op_type == Op::Remove + || in_progress) + } + None => panic!("all items in map should be SOME"), + } + } else { + false + } + }) + .collect(); + add_replacing.append(&mut add); + }); + + println!("Added values: {:#?}", add_replacing); + let paths = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")]; + paths.iter().for_each(|path| { + assert!(add_replacing.contains(&path)); + }); + } } From 07240d8062597d26c82e1bd32db51bb56a0bd566 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 24 Feb 2020 09:36:55 -0500 Subject: [PATCH 48/76] added test for removing duplicates/sorting and reformatted files to decrease filesize --- src/backend/ceph.rs | 327 ++++++++-------------------------- src/client.rs | 13 +- src/create_support_ticket.rs | 33 +--- src/disk_manager.rs | 61 ++----- src/in_progress.rs | 150 +++++----------- src/lib/host_information.rs | 9 +- src/lib/lib.rs | 9 +- src/main.rs | 328 ++++++++++------------------------- src/test_disk.rs | 250 +++++--------------------- src/test_hardware.rs | 10 +- 10 files changed, 283 insertions(+), 907 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 76663bb..d02b658 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -206,9 +206,7 @@ fn choose_ceph_config(config_dir: Option<&Path>) -> BynarResult { // backfill_cap > 0, latency_cap > 0, pool_name is valid fn validate_config(config: &mut CephConfig, cluster_handle: &Rados) -> BynarResult<()> { if config.target_weight <= 0.0 { - return Err(BynarError::from( - "target weight is less than or equal to 0.0", - )); + return Err(BynarError::from("target weight is less than or equal to 0.0")); } if config.increment < 0.0 { warn!("input increment < 0, flipping to positive value"); @@ -277,11 +275,7 @@ impl CephBackend { let version: CephVersion = version_str.parse()?; validate_config(&mut deserialized, &cluster_handle)?; - Ok(CephBackend { - cluster_handle, - config: deserialized, - version, - }) + Ok(CephBackend { cluster_handle, config: deserialized, version }) } // add a bluestore without using LVM, dev_path should be the disk path (ensure it is the disk path with get_parent_dev) @@ -310,10 +304,7 @@ impl CephBackend { //mkdir /var/lib/ceph/osd/{clustername-osd} let mount_point = Path::new("/var/lib/ceph/osd").join(&format!("ceph-{}", osd_id)); if !mount_point.exists() { - debug!( - "Mount point {} doesn't exist. Creating.", - mount_point.display() - ); + debug!("Mount point {} doesn't exist. Creating.", mount_point.display()); create_dir(&mount_point)?; } // mount /dev/sdx1 to /var/lib/ceph/osd/{clustername-osd_id} @@ -352,13 +343,7 @@ impl CephBackend { "Adding OSD {} to crushmap under host {} with weight: {}", osd_id, host_info.hostname, osd_weight ); - osd_crush_add( - &self.cluster_handle, - osd_id, - osd_weight, - &host_info.hostname, - simulate, - )?; + osd_crush_add(&self.cluster_handle, osd_id, osd_weight, &host_info.hostname, simulate)?; // gradual weight //systemctl start enable_bluestore_manual(osd_id, simulate)?; @@ -410,10 +395,7 @@ impl CephBackend { // Mount the drive let mount_point = Path::new("/var/lib/ceph/osd").join(&format!("ceph-{}", new_osd_id)); if !mount_point.exists() { - debug!( - "Mount point {} doesn't exist. Creating.", - mount_point.display() - ); + debug!("Mount point {} doesn't exist. Creating.", mount_point.display()); create_dir(&mount_point)?; } // Write out osd fsid to a file @@ -427,18 +409,11 @@ impl CephBackend { // This finds that device and then we chown it so ceph can use it let backer_device = self.resolve_lvm_device(&lv_dev_name)?; debug!("Resolved lvm device to {}", backer_device.display()); - debug!( - "Symlinking {} to {}", - lv_dev_name.display(), - mount_point.join("block").display() - ); + debug!("Symlinking {} to {}", lv_dev_name.display(), mount_point.join("block").display()); symlink(&lv_dev_name, mount_point.join("block"))?; // Optionally symlink the journal if using one if let Some(journal) = &journal { - symlink( - &Path::new(&format!("{}", journal)), - mount_point.join("block.wal"), - )?; + symlink(&Path::new(&format!("{}", journal)), mount_point.join("block.wal"))?; let ceph_user = Passwd::from_name("ceph")? .ok_or_else(|| BynarError::from("ceph user id not found"))?; self.change_permissions(&[&Path::new(&format!("{}", journal))], &ceph_user)?; @@ -486,13 +461,7 @@ impl CephBackend { "Adding OSD {} to crushmap under host {} with weight: {}", new_osd_id, host_info.hostname, osd_weight ); - osd_crush_add( - &self.cluster_handle, - new_osd_id, - osd_weight, - &host_info.hostname, - simulate, - )?; + osd_crush_add(&self.cluster_handle, new_osd_id, osd_weight, &host_info.hostname, simulate)?; systemctl_enable(new_osd_id, &osd_fsid, simulate)?; setup_osd_init(new_osd_id, simulate)?; self.gradual_weight(new_osd_id, true, simulate)?; @@ -517,10 +486,7 @@ impl CephBackend { inode_size: Some(2048), force: true, }; - debug!( - "Formatting {:?} with XFS options: {:?}", - dev_path, xfs_options - ); + debug!("Formatting {:?} with XFS options: {:?}", dev_path, xfs_options); if !simulate { block_utils::format_block_device(dev_path, &xfs_options)?; let _ = settle_udev(); @@ -545,10 +511,7 @@ impl CephBackend { let mount_point = Path::new("/var/lib/ceph/osd/").join(format!("ceph-{}", new_osd_id)); if !simulate { if !mount_point.exists() { - debug!( - "Mount point {} doesn't exist. Creating.", - mount_point.display() - ); + debug!("Mount point {} doesn't exist. Creating.", mount_point.display()); create_dir(&mount_point)?; } block_utils::mount_device(&info, &mount_point)?; @@ -557,17 +520,7 @@ impl CephBackend { let journal = self.select_journal()?; // Format the osd with the osd filesystem - ceph_mkfs( - new_osd_id, - journal.as_ref(), - false, - None, - None, - None, - None, - None, - simulate, - )?; + ceph_mkfs(new_osd_id, journal.as_ref(), false, None, None, None, None, None, simulate)?; debug!("Creating ceph authorization entry"); osd_auth_add(&self.cluster_handle, new_osd_id, simulate)?; let auth_key = auth_get_key(&self.cluster_handle, "osd", &new_osd_id.to_string())?; @@ -580,13 +533,7 @@ impl CephBackend { "Adding OSD {} to crushmap under host {} with weight: {}", new_osd_id, host_info.hostname, osd_weight ); - osd_crush_add( - &self.cluster_handle, - new_osd_id, - osd_weight, - &host_info.hostname, - simulate, - )?; + osd_crush_add(&self.cluster_handle, new_osd_id, osd_weight, &host_info.hostname, simulate)?; add_osd_to_fstab(&info, new_osd_id, simulate)?; // This step depends on whether it's systemctl, upstart, etc setup_osd_init(new_osd_id, simulate)?; @@ -598,11 +545,7 @@ impl CephBackend { fn change_permissions(&self, paths: &[&Path], perms: &Passwd) -> BynarResult<()> { for p in paths { debug!("chown {} with {}:{}", p.display(), perms.uid, perms.gid); - chown( - *p, - Some(Uid::from_raw(perms.uid)), - Some(Gid::from_raw(perms.gid)), - )?; + chown(*p, Some(Uid::from_raw(perms.uid)), Some(Gid::from_raw(perms.gid)))?; } Ok(()) } @@ -639,14 +582,7 @@ impl CephBackend { // TODO: Why does this magic number work but using the entire size doesn't? let lv = vg.create_lv_linear(&lv_name, vg.get_size() - 10_485_760)?; - self.create_lvm_tags( - &lv, - &lv_dev_name, - &osd_fsid, - new_osd_id, - &info, - journal_device, - )?; + self.create_lvm_tags(&lv, &lv_dev_name, &osd_fsid, new_osd_id, &info, journal_device)?; Ok((lv_dev_name, vg.get_size())) } @@ -742,12 +678,7 @@ impl CephBackend { // set cluster with noscrub and nodeepscrub fn set_noscrub(&self, simulate: bool) -> BynarResult<()> { osd_set(&self.cluster_handle, &OsdOption::NoScrub, false, simulate)?; - osd_set( - &self.cluster_handle, - &OsdOption::NoDeepScrub, - false, - simulate, - )?; + osd_set(&self.cluster_handle, &OsdOption::NoDeepScrub, false, simulate)?; Ok(()) } @@ -764,10 +695,7 @@ impl CephBackend { //get osd metadata let osd_meta = osd_metadata_by_id(&self.cluster_handle, osd_id)?; match osd_meta.objectstore_meta { - ObjectStoreMeta::Bluestore { - bluefs_wal_partition_path, - .. - } => { + ObjectStoreMeta::Bluestore { bluefs_wal_partition_path, .. } => { if let Some(wal_path) = bluefs_wal_partition_path { return Ok(Some(Path::new(&wal_path).to_path_buf())); } @@ -799,11 +727,7 @@ impl CephBackend { if let Some(parent_path) = block_utils::get_parent_devpath_from_path(&journal_path)? { //check if parent device is in journal devices trace!("Parent path is {}", parent_path.display()); - let journal_devices = self - .config - .journal_devices - .clone() - .unwrap_or_else(|| vec![]); + let journal_devices = self.config.journal_devices.clone().unwrap_or_else(|| vec![]); for journal_device in journal_devices { if parent_path == journal_device.device { trace!("Parent device is in journal_device list"); @@ -931,25 +855,24 @@ impl CephBackend { let lvm = Lvm::new(None)?; lvm.scan()?; // Get the volume group that this device is associated with - let vol_group_name = match lvm - .vg_name_from_device(&dev_path.to_string_lossy())? - .ok_or_else(|| { + let vol_group_name = + match lvm.vg_name_from_device(&dev_path.to_string_lossy())?.ok_or_else(|| { BynarError::new(format!( "No volume group associated with block device: {}", dev_path.display() )) }) { - Ok(vg_group) => vg_group, - Err(e) => { - // This might be a filestore osd. Fall back possibly - if is_filestore(&dev_path)? { - self.remove_filestore_osd(dev_path, simulate)?; - return Ok(()); - } else { - return Err(e); + Ok(vg_group) => vg_group, + Err(e) => { + // This might be a filestore osd. Fall back possibly + if is_filestore(&dev_path)? { + self.remove_filestore_osd(dev_path, simulate)?; + return Ok(()); + } else { + return Err(e); + } } - } - }; + }; debug!("Found volume group: {}", vol_group_name); let vg = lvm.vg_open(&vol_group_name, &OpenMode::Write)?; // Find the logical volume in that vol group @@ -1180,11 +1103,7 @@ impl CephBackend { let journal_size = u64::from_str(&self.cluster_handle.config_get("osd_journal_size")?)?; // The config file uses MB as the journal size let journal_size_mb = journal_size * 1024 * 1024; - let mut journal_devices = self - .config - .journal_devices - .clone() - .unwrap_or_else(|| vec![]); + let mut journal_devices = self.config.journal_devices.clone().unwrap_or_else(|| vec![]); // Sort by number of partitions journal_devices.sort_by_key(|j| j.num_partitions); // Clear any space that we can @@ -1196,11 +1115,7 @@ impl CephBackend { .filter(|d| match enough_free_space(&d.device, journal_size_mb) { Ok(enough) => enough, Err(e) => { - error!( - "Finding free space on {} failed: {:?}", - d.device.display(), - e - ); + error!("Finding free space on {} failed: {:?}", d.device.display(), e); false } }) @@ -1217,17 +1132,7 @@ impl CephBackend { //Measure latency using Rados' benchmark command fn get_latency(&self) -> BynarResult { let output_child = Command::new("rados") - .args(&[ - "-p", - &self.config.pool_name, - "bench", - "5", - "write", - "-t", - "1", - "-b", - "4096", - ]) + .args(&["-p", &self.config.pool_name, "bench", "5", "write", "-t", "1", "-b", "4096"]) .output()?; let output = String::from_utf8_lossy(&output_child.stdout).to_lowercase(); let lines: Vec<&str> = output.split('\n').collect(); @@ -1250,9 +1155,7 @@ impl CephBackend { } } } - Err(BynarError::from( - "benchmark output did not contain average latency", - )) + Err(BynarError::from("benchmark output did not contain average latency")) } // get the number of pgs currently backfilling @@ -1279,16 +1182,10 @@ impl CephBackend { trace!("get_current_weight: osd.{} has weight {}", osd_id, weight); return Ok(weight); } - return Err(BynarError::from(format!( - "Undefined crush weight for osd {}", - osd_id - ))); + return Err(BynarError::from(format!("Undefined crush weight for osd {}", osd_id))); } } - Err(BynarError::from(format!( - "Could not find Osd {} in crush map", - osd_id - ))) + Err(BynarError::from(format!("Could not find Osd {} in crush map", osd_id))) } // incrementally weight the osd. return true if reweight ongoing, false if finished @@ -1301,11 +1198,7 @@ impl CephBackend { let latency_cap = self.config.latency_cap; let backfill_cap = self.config.backfill_cap; let increment = self.config.increment; - let target_weight = if is_add { - self.config.target_weight - } else { - 0.0 - }; + let target_weight = if is_add { self.config.target_weight } else { 0.0 }; let crush_tree = osd_tree(&self.cluster_handle)?; let current_weight = self.get_current_weight(crush_tree, osd_id)?; @@ -1324,10 +1217,7 @@ impl CephBackend { while { let current_backfill = self.get_current_backfill()?; if current_backfill > backfill_cap { - warn!( - "Too many backfilling PGs {}, cap is {}", - current_backfill, backfill_cap - ); + warn!("Too many backfilling PGs {}, cap is {}", current_backfill, backfill_cap); } current_backfill > backfill_cap } {} @@ -1356,6 +1246,7 @@ impl CephBackend { Ok(true) } + // weight the osd slowly to the target weight so as not to introduce too // much latency into the cluster fn gradual_weight(&self, osd_id: u64, is_add: bool, simulate: bool) -> BynarResult<()> { @@ -1389,10 +1280,7 @@ impl Backend for CephBackend { } // check if the disk is already in the cluster if is_device_in_cluster(&self.cluster_handle, device)? { - debug!( - "Device {} is already in the cluster. Skipping", - device.display() - ); + debug!("Device {} is already in the cluster. Skipping", device.display()); return Ok(OpOutcome::SkipRepeat); } if self.version >= CephVersion::Luminous { @@ -1429,10 +1317,7 @@ impl Backend for CephBackend { } // check if the disk is already out of the cluster if !is_device_in_cluster(&self.cluster_handle, path_check)? { - debug!( - "Device {} is already out of the cluster. Skipping", - device.display() - ); + debug!("Device {} is already out of the cluster. Skipping", device.display()); return Ok(OpOutcome::SkipRepeat); } if self.version >= CephVersion::Luminous { @@ -1493,10 +1378,7 @@ impl Backend for CephBackend { get_osd_id_from_device(&self.cluster_handle, device)? }; // create and send the command to check if the osd is safe to remove - Ok(( - OpOutcome::Success, - osd_safe_to_destroy(&self.cluster_handle, osd_id), - )) + Ok((OpOutcome::Success, osd_safe_to_destroy(&self.cluster_handle, osd_id))) } } @@ -1509,19 +1391,13 @@ fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path) -> BynarResult< let osd_meta = osd_metadata(cluster_handle)?; for osd in osd_meta { match osd.objectstore_meta { - ObjectStoreMeta::Bluestore { - bluestore_bdev_partition_path, - .. - } => { + ObjectStoreMeta::Bluestore { bluestore_bdev_partition_path, .. } => { if bluestore_bdev_partition_path == path && osd.hostname == host { return Ok(true); } } - ObjectStoreMeta::Filestore { - backend_filestore_partition_path, - .. - } => { + ObjectStoreMeta::Filestore { backend_filestore_partition_path, .. } => { if backend_filestore_partition_path == path && osd.hostname == host { return Ok(true); } @@ -1558,19 +1434,13 @@ fn get_osd_id_from_device(cluster_handle: &Rados, dev_path: &Path) -> BynarResul let osd_meta = osd_metadata(cluster_handle)?; for osd in osd_meta { match osd.objectstore_meta { - ObjectStoreMeta::Bluestore { - bluestore_bdev_partition_path, - .. - } => { + ObjectStoreMeta::Bluestore { bluestore_bdev_partition_path, .. } => { if bluestore_bdev_partition_path == path && osd.hostname == host { return Ok(osd.id); } } - ObjectStoreMeta::Filestore { - backend_filestore_partition_path, - .. - } => { + ObjectStoreMeta::Filestore { backend_filestore_partition_path, .. } => { if backend_filestore_partition_path == path && osd.hostname == host { return Ok(osd.id); } @@ -1592,9 +1462,7 @@ fn get_osd_id_from_device(cluster_handle: &Rados, dev_path: &Path) -> BynarResul } } } - Err(BynarError::new( - "unable to find the osd in the osd metadata".to_string(), - )) + Err(BynarError::new("unable to find the osd in the osd metadata".to_string())) } fn save_keyring( @@ -1608,10 +1476,7 @@ fn save_keyring( let gid = gid.map(Gid::from_raw); let base_dir = Path::new("/var/lib/ceph/osd").join(&format!("ceph-{}", osd_id)); if !Path::new(&base_dir).exists() { - return Err(BynarError::new(format!( - "{} directory doesn't exist", - base_dir.display() - ))); + return Err(BynarError::new(format!("{} directory doesn't exist", base_dir.display()))); } debug!("Creating {}/keyring", base_dir.display()); if !simulate { @@ -1629,10 +1494,7 @@ fn add_osd_to_fstab( ) -> BynarResult<()> { let fstab = FsTab::default(); let fstab_entry = fstab::FsEntry { - fs_spec: format!( - "UUID={}", - device_info.id.unwrap().to_hyphenated().to_string() - ), + fs_spec: format!("UUID={}", device_info.id.unwrap().to_hyphenated().to_string()), mountpoint: PathBuf::from(&format!("/var/lib/ceph/osd/ceph-{}", osd_id)), vfs_type: device_info.fs_type.to_string(), mount_options: vec![ @@ -1747,9 +1609,7 @@ fn systemctl_disable(osd_id: u64, osd_uuid: &uuid::Uuid, simulate: bool) -> Byna debug!("cmd: systemctl {:?}", args); let output = Command::new("systemctl").args(&args).output()?; if !output.status.success() { - return Err(BynarError::new( - String::from_utf8_lossy(&output.stderr).into_owned(), - )); + return Err(BynarError::new(String::from_utf8_lossy(&output.stderr).into_owned())); } } Ok(()) @@ -1764,9 +1624,7 @@ fn systemctl_enable(osd_id: u64, osd_uuid: &uuid::Uuid, simulate: bool) -> Bynar debug!("cmd: systemctl {:?}", args); let output = Command::new("systemctl").args(&args).output()?; if !output.status.success() { - return Err(BynarError::new( - String::from_utf8_lossy(&output.stderr).into_owned(), - )); + return Err(BynarError::new(String::from_utf8_lossy(&output.stderr).into_owned())); } } Ok(()) @@ -1778,9 +1636,7 @@ fn systemctl_stop(osd_id: u64, simulate: bool) -> BynarResult<()> { debug!("cmd: systemctl {:?}", args); let output = Command::new("systemctl").args(&args).output()?; if !output.status.success() { - return Err(BynarError::new( - String::from_utf8_lossy(&output.stderr).into_owned(), - )); + return Err(BynarError::new(String::from_utf8_lossy(&output.stderr).into_owned())); } } Ok(()) @@ -1822,18 +1678,14 @@ fn setup_osd_init(osd_id: u64, simulate: bool) -> BynarResult<()> { } Ok(()) } - Daemon::Unknown => Err(BynarError::from( - "Unknown init system. Cannot start osd service", - )), + Daemon::Unknown => Err(BynarError::from("Unknown init system. Cannot start osd service")), } } fn settle_udev() -> BynarResult<()> { let output = Command::new("udevadm").arg("settle").output()?; if !output.status.success() { - return Err(BynarError::new( - String::from_utf8_lossy(&output.stderr).into_owned(), - )); + return Err(BynarError::new(String::from_utf8_lossy(&output.stderr).into_owned())); } Ok(()) } @@ -1851,9 +1703,8 @@ fn mkfs_osd_dir(dev_path: &str) -> BynarResult<()> { /// mount the osd directory on 100MB partition fn mount_osd_dir(dev_path: &str, mount_point: &Path) -> BynarResult<()> { - let status = Command::new("mount") - .args(&[dev_path, &format!("{}", mount_point.display())]) - .output()?; + let status = + Command::new("mount").args(&[dev_path, &format!("{}", mount_point.display())]).output()?; if !status.status.success() { return Err(BynarError::new(format!("Unable to mount {}", dev_path))); } @@ -1886,11 +1737,7 @@ fn symlink_bluestore_devices( /// add block device to udev rules if necessary fn add_block_to_udev(dev_path: &Path, udev_rule_path: &str) -> BynarResult<()> { let udev_path = Path::new(udev_rule_path); - let udev_rules = OpenOptions::new() - .read(true) - .append(true) - .create(true) - .open(udev_path)?; + let udev_rules = OpenOptions::new().read(true).append(true).create(true).open(udev_path)?; let reader = BufReader::new(udev_rules); let mut found = false; @@ -1901,16 +1748,11 @@ fn add_block_to_udev(dev_path: &Path, udev_rule_path: &str) -> BynarResult<()> { } } if !found { - let mut udev_rules = OpenOptions::new() - .read(true) - .append(true) - .create(true) - .open(udev_path)?; + let mut udev_rules = + OpenOptions::new().read(true).append(true).create(true).open(udev_path)?; udev_rules.write_all(&format!(r#"KERNEL="{}*", SUBSYSTEM=="block", ENV{{DEVTYPE}}=="partition", OWNER="ceph", GROUP="ceph", MODE="0660""#, dev.to_string_lossy()).as_bytes())?; // reload udev rules - Command::new("udevadm") - .args(&["control", "--reload-rules"]) - .output()?; + Command::new("udevadm").args(&["control", "--reload-rules"]).output()?; Command::new("udevadm").arg("trigger").output()?; } } @@ -1927,11 +1769,7 @@ fn ceph_chown(mount_point: &Path, simulate: bool) -> BynarResult<()> { .output()?; if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr).into_owned(); - error!( - "chown failed: {}. stderr: {}", - String::from_utf8_lossy(&output.stdout), - stderr - ); + error!("chown failed: {}. stderr: {}", String::from_utf8_lossy(&output.stdout), stderr); return Err(BynarError::new(stderr)); } Ok(()) @@ -1997,9 +1835,8 @@ fn enable_bluestore_manual(osd_id: u64, simulate: bool) -> BynarResult<()> { if simulate { return Ok(()); } - let output = Command::new("systemctl") - .args(&["enable", &format!("ceph-osd@{}", osd_id)]) - .output()?; + let output = + Command::new("systemctl").args(&["enable", &format!("ceph-osd@{}", osd_id)]).output()?; if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr).into_owned(); error!( @@ -2018,12 +1855,7 @@ fn zap_disk(dev_path: &Path, simulate: bool) -> BynarResult<()> { return Ok(()); } let output = Command::new("ceph-volume") - .args(&[ - "lvm", - "zap", - "--destroy", - &format!("{}", dev_path.display()), - ]) + .args(&["lvm", "zap", "--destroy", &format!("{}", dev_path.display())]) .output()?; if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr).into_owned(); @@ -2146,10 +1978,7 @@ fn create_bluestore_man_partitions(path: &Path) -> BynarResult<()> { if let Some(part1) = partitions.get(&1) { if (part1.last_lba - part1.first_lba + 1) * 512 != filesystem_size { //remove partition and then make new one - debug!( - "Remove partition {:?}", - disk.remove_partition(Some(1), None)? - ); + debug!("Remove partition {:?}", disk.remove_partition(Some(1), None)?); //add partition debug!( "Add partition {:?}", @@ -2178,10 +2007,7 @@ fn create_bluestore_man_partitions(path: &Path) -> BynarResult<()> { let last = header.last_usable; if part2.last_lba < last { //remove partition and then make new one - debug!( - "Remove Partition {:?}", - disk.remove_partition(Some(2), None)? - ); + debug!("Remove Partition {:?}", disk.remove_partition(Some(2), None)?); let first_end = match disk.partitions().get(&1) { Some(p1) => p1.last_lba, None => { @@ -2207,9 +2033,7 @@ fn create_bluestore_man_partitions(path: &Path) -> BynarResult<()> { Some(p1) => p1.last_lba, None => { error!("First partition does not exist!"); - return Err(BynarError::from( - "First partition does not exist!".to_string(), - )); + return Err(BynarError::from("First partition does not exist!".to_string())); } }; let size = last - first_end; @@ -2400,20 +2224,14 @@ fn is_filestore(dev_path: &Path) -> BynarResult { // Linux specific ioctl to update the partition table cache. fn update_partition_cache(device: &Path) -> BynarResult<()> { - debug!( - "Requesting kernel to refresh partition cache for {} ", - device.display() - ); + debug!("Requesting kernel to refresh partition cache for {} ", device.display()); let dev_path = device; let device = OpenOptions::new().read(true).write(true).open(device)?; //Occaisonally blkrrpart will fail, device busy etc. run partprobe instead match unsafe { blkrrpart(device.as_raw_fd()) } { Ok(ret) => { if ret != 0 { - Err(BynarError::new(format!( - "BLKRRPART ioctl failed with return code: {}", - ret, - ))) + Err(BynarError::new(format!("BLKRRPART ioctl failed with return code: {}", ret,))) } else { Ok(()) } @@ -2427,17 +2245,12 @@ fn update_partition_cache(device: &Path) -> BynarResult<()> { } fn part_probe(device: &Path) -> BynarResult<()> { - let output = Command::new("partprobe") - .arg(&format!("{}", device.display())) - .output()?; + let output = Command::new("partprobe").arg(&format!("{}", device.display())).output()?; if let Some(0) = output.status.code() { trace!("Partprobe successful!"); return Ok(()); } - Err(BynarError::new(format!( - "partprobe failed {:?}", - output.stderr - ))) + Err(BynarError::new(format!("partprobe failed {:?}", output.stderr))) } /// check if a device is in the list of SystemDisks diff --git a/src/client.rs b/src/client.rs index 0e73283..34a1325 100644 --- a/src/client.rs +++ b/src/client.rs @@ -337,20 +337,13 @@ fn get_cli_args(default_server_key: &str) -> ArgMatches<'_> { .takes_value(true), ), ) - .arg( - Arg::with_name("v") - .short("v") - .multiple(true) - .help("Sets the level of verbosity"), - ) + .arg(Arg::with_name("v").short("v").multiple(true).help("Sets the level of verbosity")) .get_matches() } fn main() { - let server_key = format!( - "/etc/bynar/{}.pem", - get_hostname().unwrap_or_else(|| "ecpubkey".to_string()) - ); + let server_key = + format!("/etc/bynar/{}.pem", get_hostname().unwrap_or_else(|| "ecpubkey".to_string())); let matches = get_cli_args(&server_key); let level = match matches.occurrences_of("v") { 0 => log::LevelFilter::Info, //default diff --git a/src/create_support_ticket.rs b/src/create_support_ticket.rs index 00759b6..3ee6e84 100644 --- a/src/create_support_ticket.rs +++ b/src/create_support_ticket.rs @@ -13,30 +13,18 @@ pub fn create_support_ticket( ) -> BynarResult { let issue_description = CreateIssue { fields: Fields { - assignee: Assignee { - name: settings.jira_ticket_assignee.clone(), - }, - components: vec![Component { - name: "Ceph".into(), - }], + assignee: Assignee { name: settings.jira_ticket_assignee.clone() }, + components: vec![Component { name: "Ceph".into() }], description: description.into(), - issuetype: IssueType { - id: settings.jira_issue_type.clone(), - }, - priority: Priority { - id: settings.jira_priority.clone(), - }, - project: Project { - key: settings.jira_project_id.clone(), - }, + issuetype: IssueType { id: settings.jira_issue_type.clone() }, + priority: Priority { id: settings.jira_priority.clone() }, + project: Project { key: settings.jira_project_id.clone() }, summary: title.into(), }, }; let jira: Jira = match settings.proxy { Some(ref url) => { - let client = reqwest::Client::builder() - .proxy(reqwest::Proxy::all(url)?) - .build()?; + let client = reqwest::Client::builder().proxy(reqwest::Proxy::all(url)?).build()?; Jira::from_client( settings.jira_host.to_string(), Credentials::Basic(settings.jira_user.clone(), settings.jira_password.clone()), @@ -50,10 +38,7 @@ pub fn create_support_ticket( }; let issue = Issues::new(&jira); - debug!( - "Creating JIRA ticket with information: {:?}", - issue_description - ); + debug!("Creating JIRA ticket with information: {:?}", issue_description); let results = issue.create(issue_description)?; Ok(results.id) } @@ -62,9 +47,7 @@ pub fn create_support_ticket( pub fn ticket_resolved(settings: &ConfigSettings, issue_id: &str) -> BynarResult { let jira: Jira = match settings.proxy { Some(ref url) => { - let client = reqwest::Client::builder() - .proxy(reqwest::Proxy::all(url)?) - .build()?; + let client = reqwest::Client::builder().proxy(reqwest::Proxy::all(url)?).build()?; Jira::from_client( settings.jira_host.to_string(), Credentials::Basic(settings.jira_user.clone(), settings.jira_password.clone()), diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 4e94047..f5552b2 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -134,18 +134,10 @@ fn op_insert(req_map: &mut HashMap>, op: &Operation) { // send a notification to slack channel (if config has webhook) fn notify_slack(config: &DiskManagerConfig, msg: &str) -> BynarResult<()> { let c = config.clone(); - let slack = Slack::new( - c.slack_webhook - .expect("slack webhook option is None") - .as_ref(), - )?; + let slack = Slack::new(c.slack_webhook.expect("slack webhook option is None").as_ref())?; let slack_channel = c.slack_channel.unwrap_or_else(|| "".to_string()); let bot_name = c.slack_botname.unwrap_or_else(|| "".to_string()); - let p = PayloadBuilder::new() - .text(msg) - .channel(slack_channel) - .username(bot_name) - .build()?; + let p = PayloadBuilder::new().text(msg).channel(slack_channel).username(bot_name).build()?; let res = slack.send(&p); match res { @@ -221,10 +213,7 @@ fn setup_curve(s: &Socket, config_dir: &Path, vault: bool) -> BynarResult<()> { } let endpoint = config.vault_endpoint.unwrap(); let token = config.vault_token.unwrap(); - debug!( - "Connecting to vault to save the public key to /bynar/{}.pem", - hostname - ); + debug!("Connecting to vault to save the public key to /bynar/{}.pem", hostname); let client = VaultClient::new(endpoint.as_str(), token)?; client.set_secret( format!("{}/{}.pem", config_dir.display(), hostname), @@ -286,9 +275,7 @@ fn listen( debug!("Listening on tcp://{}:5555", listen_address); // Fail to start if this fails setup_curve(&responder, config_dir, vault)?; - assert!(responder - .bind(&format!("tcp://{}:5555", listen_address)) - .is_ok()); + assert!(responder.bind(&format!("tcp://{}:5555", listen_address)).is_ok()); debug!("Building thread pool"); //Note, for now we are using 16 threads by default @@ -634,9 +621,8 @@ fn get_disks() -> BynarResult> { debug!("Gathering udev info on block devices"); // Gather info on all devices and skip Loopback devices - let device_info: Vec = block_utils::get_all_device_info(devices.as_slice())? - .into_iter() - .collect(); + let device_info: Vec = + block_utils::get_all_device_info(devices.as_slice())?.into_iter().collect(); debug!("Device info found: {:?}", device_info); debug!("Gathering partition info"); @@ -873,18 +859,8 @@ fn main() { .takes_value(true) .required(false), ) - .arg( - Arg::with_name("v") - .short("v") - .multiple(true) - .help("Sets the level of verbosity"), - ) - .arg( - Arg::with_name("daemon") - .help("Run Bynar as a daemon") - .long("daemon") - .required(false), - ) + .arg(Arg::with_name("v").short("v").multiple(true).help("Sets the level of verbosity")) + .arg(Arg::with_name("daemon").help("Run Bynar as a daemon").long("daemon").required(false)) .get_matches(); let daemon = matches.is_present("daemon"); let level = match matches.occurrences_of("v") { @@ -907,16 +883,9 @@ fn main() { //Sanity check let config_dir = Path::new(matches.value_of("configdir").unwrap()); if !config_dir.exists() { - warn!( - "Config directory {} doesn't exist. Creating", - config_dir.display() - ); + warn!("Config directory {} doesn't exist. Creating", config_dir.display()); if let Err(e) = create_dir(config_dir) { - error!( - "Unable to create directory {}: {}", - config_dir.display(), - e.to_string() - ); + error!("Unable to create directory {}: {}", config_dir.display(), e.to_string()); return; } } @@ -1026,10 +995,7 @@ fn main() { println!("Finished"); notify_slack( &config, - &format!( - "Disk-Manager Exited Successfully on host {}", - host_info.hostname - ), + &format!("Disk-Manager Exited Successfully on host {}", host_info.hostname), ) .expect("Unable to connect to slack"); } @@ -1037,10 +1003,7 @@ fn main() { println!("Error: {:?}", e); notify_slack( &config, - &format!( - "Disk-Manager Errored out on host {} with {:?}", - host_info.hostname, e - ), + &format!("Disk-Manager Errored out on host {} with {:?}", host_info.hostname, e), ) .expect("Unable to connect to slack"); } diff --git a/src/in_progress.rs b/src/in_progress.rs index 6153dc4..016db8f 100644 --- a/src/in_progress.rs +++ b/src/in_progress.rs @@ -93,10 +93,7 @@ mod tests { println!("Added operation with ID {}", o_id); // call add_disk_detail again for same device - println!( - "Re-adding same disk with id {} again to the database", - dev_id - ); + println!("Re-adding same disk with id {} again to the database", dev_id); let _disk_result2 = super::add_disk_detail(&pool, &mut d).unwrap(); // Clear device_database_id to mimic re-run and add again @@ -107,10 +104,7 @@ mod tests { None => 0, Some(i) => i, }; - println!( - "Dev-id after reinsert attempt {}, old {}", - new_dev_id, dev_id - ); + println!("Dev-id after reinsert attempt {}, old {}", new_dev_id, dev_id); // now update operation println!("Updating first operation with snapshot time"); @@ -156,10 +150,7 @@ mod tests { // get state again, and compare -- they should be same let new_state_result = super::get_state(&pool, &d).unwrap(); - println!( - "State for dev name {} is {:#?}", - d.device.name, new_state_result - ); + println!("State for dev name {} is {:#?}", d.device.name, new_state_result); assert_eq!(new_state, new_state_result); let tickets = @@ -173,11 +164,7 @@ mod tests { None, ) .unwrap(); - println!( - "disk {} needs repair {}", - d.dev_path.display(), - is_repair_needed - ); + println!("disk {} needs repair {}", d.dev_path.display(), is_repair_needed); let all_devices = super::get_devices_from_db(&pool, result.storage_detail_id).unwrap(); println!("All devices {:#?}", all_devices); @@ -209,12 +196,7 @@ impl DiskPendingTicket { device_path: String, device_id: i32, ) -> DiskPendingTicket { - DiskPendingTicket { - ticket_id, - device_name, - device_path, - device_id, - } + DiskPendingTicket { ticket_id, device_name, device_path, device_id } } } @@ -227,11 +209,7 @@ pub struct HostDetailsMapping { impl HostDetailsMapping { pub fn new(entry_id: u32, region_id: u32, storage_detail_id: u32) -> HostDetailsMapping { - HostDetailsMapping { - entry_id, - region_id, - storage_detail_id, - } + HostDetailsMapping { entry_id, region_id, storage_detail_id } } } @@ -260,12 +238,15 @@ impl OperationInfo { done_time: None, } } + fn set_operation_id(&mut self, op_id: u32) { self.operation_id = Some(op_id); } + pub fn set_done_time(&mut self, done_time: DateTime) { self.done_time = Some(done_time); } + pub fn set_snapshot_time(&mut self, snapshot_time: DateTime) { self.snapshot_time = snapshot_time; } @@ -336,6 +317,7 @@ impl OperationDetail { done_time: None, } } + fn set_operation_detail_id(&mut self, op_detail_id: u32) { self.op_detail_id = Some(op_detail_id); } @@ -370,10 +352,8 @@ pub fn create_db_connection_pool(db_config: &DBConfig) -> BynarResult BynarResult<()> { // Checks for the region in the database, inserts if it does not exist // and returns the region_id fn update_region(conn: &Transaction<'_>, region: &str) -> BynarResult { - let stmt = format!( - "SELECT region_id FROM regions WHERE region_name = '{}'", - region - ); + let stmt = format!("SELECT region_id FROM regions WHERE region_name = '{}'", region); let stmt_query = conn.query(&stmt, &[])?; let mut region_id: u32 = 0; @@ -571,11 +548,7 @@ pub fn add_disk_detail( let stmt_query = conn.query( "SELECT device_id FROM hardware WHERE device_path=$1 AND detail_id=$2 AND device_name=$3", - &[ - &format!("{}", disk_info.dev_path.display()), - &detail_id, - &disk_info.device.name, - ], + &[&format!("{}", disk_info.dev_path.display()), &detail_id, &disk_info.device.name], )?; if stmt_query.is_empty() { // A record doesn't exist, insert @@ -584,10 +557,8 @@ pub fn add_disk_detail( let mut hardware_type: i32 = 2; // this is the usual value added to DB for disk type // Get hardware_type id from DB - let stmt2 = conn.query( - "SELECT hardware_id FROM hardware_types WHERE hardware_type='disk'", - &[], - )?; + let stmt2 = + conn.query("SELECT hardware_id FROM hardware_types WHERE hardware_type='disk'", &[])?; if let Some(res) = stmt2.into_iter().next() { hardware_type = res.get("hardware_id"); } @@ -759,9 +730,7 @@ pub fn add_or_update_operation( op_info.set_operation_id(oid as u32); Ok(()) } else { - Err(BynarError::new( - "Query to insert operation into DB failed".to_string(), - )) + Err(BynarError::new("Query to insert operation into DB failed".to_string())) } } Some(_) => { @@ -868,25 +837,16 @@ pub fn save_state( device_detail: &BlockDevice, state: State, ) -> BynarResult<()> { - debug!( - "Saving state as {} for device {}", - state, device_detail.device.name - ); + debug!("Saving state as {} for device {}", state, device_detail.device.name); let conn = get_connection_from_pool(pool)?; if let Some(dev_id) = device_detail.device_database_id { // Device is in database, update the state. Start a transaction to roll back if needed. // transaction rolls back by default. let transaction = conn.transaction()?; - let stmt = format!( - "UPDATE hardware SET state = '{}' WHERE device_id={}", - state, dev_id - ); + let stmt = format!("UPDATE hardware SET state = '{}' WHERE device_id={}", state, dev_id); let stmt_query = transaction.execute(&stmt, &[])?; - info!( - "Updated {} rows in database with state information", - stmt_query - ); + info!("Updated {} rows in database with state information", stmt_query); if stmt_query != 1 { // Only one device should be updated. Rollback transaction.set_rollback(); @@ -928,10 +888,7 @@ pub fn save_smart_result( smart_passed, dev_id ); let stmt_query = transaction.execute(&stmt, &[])?; - info!( - "Updated {} rows in database with smart check result", - stmt_query - ); + info!("Updated {} rows in database with smart check result", stmt_query); if stmt_query != 1 { // Only one device should be updated. Rollback transaction.set_rollback(); @@ -993,10 +950,8 @@ pub fn get_state( match device_detail.device_database_id { Some(dev_id) => { let dev_id = dev_id as i32; - let stmt_query = conn.query( - "SELECT state FROM hardware WHERE device_id = $1", - &[&dev_id], - )?; + let stmt_query = + conn.query("SELECT state FROM hardware WHERE device_id = $1", &[&dev_id])?; if stmt_query.len() != 1 || stmt_query.is_empty() { // Database doesn't know about the device. Must be new disk. Ok(State::Unscanned) @@ -1030,10 +985,7 @@ pub fn get_smart_result( let conn = get_connection_from_pool(pool)?; if let Some(dev_id) = device_detail.device_database_id { - let stmt = format!( - "SELECT smart_passed FROM hardware WHERE device_id = {}", - dev_id - ); + let stmt = format!("SELECT smart_passed FROM hardware WHERE device_id = {}", dev_id); let stmt_query = conn.query(&stmt, &[])?; if stmt_query.len() != 1 || stmt_query.is_empty() { // Query didn't return anything. Assume smart checks have not been done/passed @@ -1054,11 +1006,7 @@ pub fn get_smart_result( } fn row_to_ticket(row: &Row<'_>) -> DiskRepairTicket { - DiskRepairTicket { - ticket_id: row.get(0), - device_name: row.get(1), - device_path: row.get(2), - } + DiskRepairTicket { ticket_id: row.get(0), device_name: row.get(1), device_path: row.get(2) } } /// Get a list of ticket IDs (JIRA/other ids) that belong to me. @@ -1079,17 +1027,14 @@ pub fn get_outstanding_repair_tickets( tracking_id IS NOT NULL ORDER BY operations.start_time"; let detail_id = storage_detail_id as i32; - let stmt_query = conn.query( - &stmt, - &[ - &OperationStatus::InProgress.to_string(), - &OperationStatus::Pending.to_string(), - &OperationType::WaitingForReplacement.to_string(), - &State::WaitingForReplacement.to_string(), - &State::Good.to_string(), - &detail_id, - ], - )?; + let stmt_query = conn.query(&stmt, &[ + &OperationStatus::InProgress.to_string(), + &OperationStatus::Pending.to_string(), + &OperationType::WaitingForReplacement.to_string(), + &State::WaitingForReplacement.to_string(), + &State::Good.to_string(), + &detail_id, + ])?; let mut tickets: Vec = Vec::new(); if stmt_query.is_empty() { debug!( @@ -1124,10 +1069,7 @@ pub fn resolve_ticket_in_db(pool: &Pool, ticket_id: &str) -> ticket_id ); let stmt_query = conn.execute(&stmt, &[])?; - info!( - "Updated {} rows in database. Ticket {} marked as complete.", - stmt_query, ticket_id - ); + info!("Updated {} rows in database. Ticket {} marked as complete.", stmt_query, ticket_id); Ok(()) } @@ -1255,16 +1197,13 @@ pub fn get_all_pending_tickets( type_id = (SELECT type_id FROM operation_types WHERE op_name= $3) AND hardware.state in ($4, $5) AND tracking_id IS NOT NULL ORDER BY operations.start_time"; - let stmt_query = conn.query( - &stmt, - &[ - &OperationStatus::InProgress.to_string(), - &OperationStatus::Pending.to_string(), - &OperationType::WaitingForReplacement.to_string(), - &State::WaitingForReplacement.to_string(), - &State::Good.to_string(), - ], - )?; + let stmt_query = conn.query(&stmt, &[ + &OperationStatus::InProgress.to_string(), + &OperationStatus::Pending.to_string(), + &OperationType::WaitingForReplacement.to_string(), + &State::WaitingForReplacement.to_string(), + &State::Good.to_string(), + ])?; if stmt_query.is_empty() { debug!("No pending tickets for any host "); @@ -1273,12 +1212,7 @@ pub fn get_all_pending_tickets( let mut tickets: Vec = Vec::with_capacity(stmt_query.len()); debug!("{} pending tickets for all hosts ", stmt_query.len()); for row in stmt_query.iter() { - tickets.push(DiskPendingTicket::new( - row.get(0), - row.get(1), - row.get(2), - row.get(3), - )); + tickets.push(DiskPendingTicket::new(row.get(0), row.get(1), row.get(2), row.get(3))); } Ok(tickets) } diff --git a/src/lib/host_information.rs b/src/lib/host_information.rs index 9b165cd..367865d 100644 --- a/src/lib/host_information.rs +++ b/src/lib/host_information.rs @@ -39,10 +39,7 @@ impl Host { let region = get_region_from_hostname(&hostname)?; let storage_type = get_storage_type()?; - debug!( - "ip {}, region {}, storage_type {}", - ip, region, storage_type - ); + debug!("ip {}, region {}, storage_type {}", ip, region, storage_type); let server_type = server_type()?; let serial_number = server_serial()?; debug!("Gathering raid info"); @@ -170,9 +167,7 @@ fn server_type() -> BynarResult { let buff = read_to_string(path)?; return Ok(buff.trim().into()); } - Err(BynarError::from( - "/sys/class/dmi/id/product_name does not exist", - )) + Err(BynarError::from("/sys/class/dmi/id/product_name does not exist")) } fn server_serial() -> BynarResult { diff --git a/src/lib/lib.rs b/src/lib/lib.rs index cb4f8a1..0f24b5c 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -41,15 +41,8 @@ pub fn connect(host: &str, port: &str, server_publickey: &[u8]) -> BynarResult, operation_id: Option) -> DiskOp { - DiskOp { - op_type: op.get_Op_type(), - description, - operation_id, - ret_val: None, - } + DiskOp { op_type: op.get_Op_type(), description, operation_id, ret_val: None } } } @@ -130,9 +125,7 @@ fn create_msg_map( partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -168,10 +161,7 @@ fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { Ok((dev_path.to_path_buf(), dev_path)) // disk...probably } else { // path just doesn't exist, so error... - error!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ); + error!("Path {} does not exist, nor does its parent.", dev_path.display()); return Err(BynarError::from(format!( "Path {} does not exist, nor does its parent.", dev_path.display() @@ -196,10 +186,7 @@ fn add_or_update_map_op( } if &parent == dev_path { // if exists Some(disk) then dev_path should also exist (since creation) of entry in map requires it - error!( - "Map is missing the disk entry but disk {} exists in the map", - parent.display() - ); + error!("Map is missing the disk entry but disk {} exists in the map", parent.display()); return Err(BynarError::from(format!( "Map is missing the disk entry but disk {} exists in the map", parent.display() @@ -215,9 +202,7 @@ fn add_or_update_map_op( partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&parent.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&parent.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -261,10 +246,7 @@ fn remove_map_op( return Ok(partition.clone()); } } - Err(BynarError::from(format!( - "Path {} is not in the message map", - dev_path.display() - ))) + Err(BynarError::from(format!("Path {} is not in the message map", dev_path.display()))) } // get the hashmap associated with a diskpath from the op map @@ -277,26 +259,15 @@ fn get_disk_map_op( if let Some(disk) = message_map.get(&parent) { return Ok(disk.clone()); } - Err(BynarError::from(format!( - "Path {} is not a disk in the map", - dev_path.display() - ))) + Err(BynarError::from(format!("Path {} is not a disk in the map", dev_path.display()))) } fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let c = config.clone(); - let slack = Slack::new( - c.slack_webhook - .expect("slack webhook option is None") - .as_ref(), - )?; + let slack = Slack::new(c.slack_webhook.expect("slack webhook option is None").as_ref())?; let slack_channel = c.slack_channel.unwrap_or_else(|| "".to_string()); let bot_name = c.slack_botname.unwrap_or_else(|| "".to_string()); - let p = PayloadBuilder::new() - .text(msg) - .channel(slack_channel) - .username(bot_name) - .build()?; + let p = PayloadBuilder::new().text(msg).channel(slack_channel).username(bot_name).build()?; let res = slack.send(&p); match res { @@ -311,23 +282,13 @@ fn get_public_key(config: &ConfigSettings, host_info: &Host) -> BynarResult> = - test_disk::check_all_disks(&host_info, pool, host_mapping)? - .into_iter() - .collect(); + test_disk::check_all_disks(&host_info, pool, host_mapping)?.into_iter().collect(); // separate the states into Ok and Errors let usable_states: Vec = match all_states { Ok(s) => s, Err(e) => { error!("check_all_disks failed with error: {:?}", e); - return Err(BynarError::new(format!( - "check_all_disks failed with error: {:?}", - e - ))); + return Err(BynarError::new(format!("check_all_disks failed with error: {:?}", e))); } }; //filter all the disks that are in the WaitingForReplacement state and are not currently undergoing an operation @@ -468,10 +422,7 @@ fn check_for_failed_disks( //combine with replacing, then do sort_unstable_by and dedup_rm replacing.append(&mut add_replacing); replacing.sort_unstable_by(|a, b| { - a.block_device - .dev_path - .partial_cmp(&b.block_device.dev_path) - .unwrap() + a.block_device.dev_path.partial_cmp(&b.block_device.dev_path).unwrap() }); replacing.dedup_by(|a, b| a.block_device.dev_path.eq(&b.block_device.dev_path)); //filter Fail disks in seperate vec and soft-error those at the end before checking the errored_states @@ -484,11 +435,7 @@ fn check_for_failed_disks( // add safeToRemove + Remove request to message_queue, checking if its already in first // create Operation, description, and get the op_id let mut desc = description.clone(); - add_disk_to_description( - &mut desc, - &state_machine.block_device.dev_path, - &state_machine, - ); + add_disk_to_description(&mut desc, &state_machine.block_device.dev_path, &state_machine); let op_id = match state_machine.block_device.operation_id { None => { error!( @@ -504,10 +451,8 @@ fn check_for_failed_disks( format!("{}", state_machine.block_device.dev_path.display()) ); let mess: (Operation, Option, Option) = (op, Some(desc.clone()), Some(op_id)); - let op2 = helpers::make_op!( - Remove, - format!("{}", state_machine.block_device.dev_path.display()) - ); + let op2 = + helpers::make_op!(Remove, format!("{}", state_machine.block_device.dev_path.display())); let mess2: (Operation, Option, Option) = (op2, Some(desc), Some(op_id)); if !message_queue.contains(&mess) && !message_queue.contains(&mess2) { message_queue.push_back(mess); @@ -658,10 +603,7 @@ fn check_for_failed_disks( }; }*/ failed.iter().for_each(|state_machine| { - error!( - "Disk {} ended in a Fail state", - state_machine.block_device.dev_path.display() - ) + error!("Disk {} ended in a Fail state", state_machine.block_device.dev_path.display()) }); Ok(()) } @@ -677,9 +619,7 @@ fn evaluate( match e { // This is the error we're after BynarError::HardwareError(HardwareError { - ref name, - ref serial_number, - .. + ref name, ref serial_number, .. }) => { let serial = serial_number.as_ref().map(|s| &**s); let in_progress = in_progress::is_hardware_waiting_repair( @@ -831,10 +771,7 @@ fn add_repaired_disks( } Ok(false) => {} Err(e) => { - error!( - "Error getting resolved ticket status for {}. {:?}", - &ticket.ticket_id, e - ); + error!("Error getting resolved ticket status for {}. {:?}", &ticket.ticket_id, e); } }; } @@ -934,10 +871,7 @@ fn handle_operation_result( return Ok(()); } } - error!( - "Unable to get current operation in the map for {}", - path.display() - ); + error!("Unable to get current operation in the map for {}", path.display()); Err(BynarError::from(format!( "Unable to get current operation in the map for {}", path.display() @@ -1324,18 +1258,8 @@ fn main() { .long("simulate") .required(false), ) - .arg( - Arg::with_name("v") - .short("v") - .multiple(true) - .help("Sets the level of verbosity"), - ) - .arg( - Arg::with_name("daemon") - .help("Run Bynar as a daemon") - .long("daemon") - .required(false), - ) + .arg(Arg::with_name("v").short("v").multiple(true).help("Sets the level of verbosity")) + .arg(Arg::with_name("daemon").help("Run Bynar as a daemon").long("daemon").required(false)) .arg( Arg::with_name("time") .help("Time in seconds between Bynar runs") @@ -1366,16 +1290,9 @@ fn main() { )); let config_dir = Path::new(matches.value_of("configdir").unwrap()); if !config_dir.exists() { - warn!( - "Config directory {} doesn't exist. Creating", - config_dir.display() - ); + warn!("Config directory {} doesn't exist. Creating", config_dir.display()); if let Err(e) = create_dir(config_dir) { - error!( - "Unable to create directory {}: {}", - config_dir.display(), - e.to_string() - ); + error!("Unable to create directory {}: {}", config_dir.display(), e.to_string()); return; } } @@ -1484,17 +1401,15 @@ fn main() { } }; let public_key = get_public_key(&config, &host_info).unwrap(); - let s = match helpers::connect( - &config.manager_host, - &config.manager_port.to_string(), - &public_key, - ) { - Ok(s) => s, - Err(e) => { - error!("Error connecting to socket: {:?}", e); - return; - } - }; + let s = + match helpers::connect(&config.manager_host, &config.manager_port.to_string(), &public_key) + { + Ok(s) => s, + Err(e) => { + error!("Error connecting to socket: {:?}", e); + return; + } + }; let client_id: Vec = s.get_identity().unwrap(); debug!("Client ID {:?}, len {}", client_id, client_id.len()); let dur = Duration::from_secs(time); @@ -1620,11 +1535,8 @@ fn main() { debug!("Request Map after looping {:?}", message_map); } debug!("Bynar exited successfully"); - notify_slack( - &config, - &format!("Bynar on host {} has stopped", host_info.hostname), - ) - .expect("Unable to connect to slack"); + notify_slack(&config, &format!("Bynar on host {} has stopped", host_info.hostname)) + .expect("Unable to connect to slack"); } #[cfg(test)] @@ -1706,9 +1618,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -1761,9 +1671,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -1775,12 +1683,9 @@ mod tests { // check that for every device in devices, there is a hashmap // in the map with the device and all its partitions - let sda_map = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - ] - .to_vec(); + let sda_map = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")] + .to_vec(); let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); let sdd_map = [ @@ -1833,12 +1738,9 @@ mod tests { // this is testing the expected behavior of parts inside the function assuming certain call result fn test_create_msg_map_with_db() { // since we want to test specifically the partitions we need an explicit device list - let mut devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + let mut devices: Vec = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdd")] + .to_vec(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); let db_devices: Vec = [ @@ -1886,9 +1788,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -1900,12 +1800,9 @@ mod tests { // check that for every device in devices, there is a hashmap // in the map with the device and all its partitions - let sda_map = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - ] - .to_vec(); + let sda_map = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")] + .to_vec(); let sdb_map = [PathBuf::from("/dev/sdb")].to_vec(); let sdc_map = [PathBuf::from("/dev/sdc"), PathBuf::from("/dev/sdc1")].to_vec(); let sdd_map = [ @@ -2001,13 +1898,7 @@ mod tests { let parent = PathBuf::from("/dev/sda"); assert!( - map.get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .as_ref() - .unwrap() - .op_type + map.get(&parent).unwrap().get(&insert_path).unwrap().as_ref().unwrap().op_type == Op::Remove ); let disk = map.get_mut(&parent).unwrap(); // we know map should have this @@ -2019,13 +1910,7 @@ mod tests { } println!("New Map: \n{:#?}", map); assert!( - map.get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .as_ref() - .unwrap() - .op_type + map.get(&parent).unwrap().get(&insert_path).unwrap().as_ref().unwrap().op_type == Op::Add ); } @@ -2061,11 +1946,7 @@ mod tests { println!("New Map: \n{:#?}", map); assert!(map.get(&parent).is_some()); assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); - assert!(map - .get(&parent) - .unwrap() - .get(&PathBuf::from("/dev/sdb2")) - .is_some()); + assert!(map.get(&parent).unwrap().get(&PathBuf::from("/dev/sdb2")).is_some()); } #[test] @@ -2111,13 +1992,7 @@ mod tests { let parent = PathBuf::from("/dev/sda"); assert!( - map.get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .as_ref() - .unwrap() - .op_type + map.get(&parent).unwrap().get(&insert_path).unwrap().as_ref().unwrap().op_type == Op::Remove ); let disk = map.get_mut(&parent).unwrap(); // we know map should have this @@ -2129,13 +2004,7 @@ mod tests { } println!("New Map: \n{:#?}", map); assert!( - map.get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .as_ref() - .unwrap() - .op_type + map.get(&parent).unwrap().get(&insert_path).unwrap().as_ref().unwrap().op_type == Op::Add ); } @@ -2171,11 +2040,7 @@ mod tests { println!("New Map: \n{:#?}", map); assert!(map.get(&parent).is_some()); assert!(map.get(&parent).unwrap().get(&insert_path).is_some()); - assert!(map - .get(&parent) - .unwrap() - .get(&PathBuf::from("/dev/sdb")) - .is_some()); + assert!(map.get(&parent).unwrap().get(&PathBuf::from("/dev/sdb")).is_some()); } #[test] @@ -2215,12 +2080,8 @@ mod tests { map.insert(parent.to_path_buf(), disk_map); println!("Map: \n{:#?}", map); - assert!(get_map_op(&map, &PathBuf::from("/dev/sda")) - .unwrap() - .is_none()); - assert!(get_map_op(&map, &PathBuf::from("/dev/sda1")) - .unwrap() - .is_some()); + assert!(get_map_op(&map, &PathBuf::from("/dev/sda")).unwrap().is_none()); + assert!(get_map_op(&map, &PathBuf::from("/dev/sda1")).unwrap().is_some()); } #[test] @@ -2241,19 +2102,9 @@ mod tests { map.insert(parent.to_path_buf(), disk_map); println!("Map: \n{:#?}", map); - assert!(map - .get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .is_some()); + assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_some()); remove_map_op(&mut map, &insert_path); - assert!(map - .get(&parent) - .unwrap() - .get(&insert_path) - .unwrap() - .is_none()); + assert!(map.get(&parent).unwrap().get(&insert_path).unwrap().is_none()); println!("After Removal: \n{:#?}", map); } @@ -2312,9 +2163,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -2357,10 +2206,7 @@ mod tests { .collect(); println!("Replacing: {:#?}", replacing); - assert_eq!( - replacing, - [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec() - ); + assert_eq!(replacing, [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec()); } #[test] // check filter disks that are Waiting for Replacement with map having Add @@ -2393,9 +2239,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { let op = Operation::new(); @@ -2480,9 +2324,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { let mut op = Operation::new(); @@ -2569,9 +2411,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { disk_map.insert(partition.to_path_buf(), None); @@ -2663,9 +2503,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { let op = Operation::new(); @@ -2765,9 +2603,7 @@ mod tests { partitions .iter() .filter(|partition| { - partition - .to_string_lossy() - .contains(&device.to_string_lossy().to_string()) + partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) }) .for_each(|partition| { let mut op = Operation::new(); @@ -2830,4 +2666,32 @@ mod tests { assert!(add_replacing.contains(&path)); }); } + + #[test] + // remove all duplicates from the replacing vector + fn test_remove_duplicates() { + let mut replacing = [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec(); + let mut paths = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + ] + .to_vec(); + replacing.append(&mut paths); + println!("Appended: {:#?}", replacing); + replacing.sort_unstable_by(|a, b| a.partial_cmp(&b.to_path_buf()).unwrap()); + replacing.dedup_by(|a, b| a == b); + let compare = [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdc1"), + ] + .to_vec(); + println!("Sorted and unique: {:#?}", replacing); + assert_eq!(compare, replacing); + } } diff --git a/src/test_disk.rs b/src/test_disk.rs index c20988c..c4c4bc2 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -107,8 +107,7 @@ mod tests { debug!("writing 25MB to {}", file_path.display()); let buff = [0x00; 1024]; for _ in 0..25600 { - f.write(&buff) - .expect("Failed to write to loop backing file"); + f.write(&buff).expect("Failed to write to loop backing file"); } f.sync_all().unwrap(); @@ -121,25 +120,16 @@ mod tests { // Put an xfs filesystem down on it debug!("Putting xfs on to {}", free_device); - Command::new("mkfs.xfs") - .args(&[free_device]) - .status() - .unwrap(); + Command::new("mkfs.xfs").args(&[free_device]).status().unwrap(); PathBuf::from(free_device) } fn cleanup_loop_device(p: &Path) { // Cleanup - Command::new("umount") - .args(&[&p.to_string_lossy().into_owned()]) - .status() - .unwrap(); + Command::new("umount").args(&[&p.to_string_lossy().into_owned()]).status().unwrap(); - Command::new("losetup") - .args(&["-d", &p.to_string_lossy()]) - .status() - .unwrap(); + Command::new("losetup").args(&["-d", &p.to_string_lossy()]).status().unwrap(); } #[test] @@ -415,10 +405,7 @@ impl Transition for CheckForCorruption { _scsi_info: &Option<(ScsiInfo, Option)>, simulate: bool, ) -> State { - debug!( - "thread {} running CheckForCorruption transition", - process::id() - ); + debug!("thread {} running CheckForCorruption transition", process::id()); if !simulate { // keep ref to mountpoint. check if filesystem unmounted (if not unmount first) // After running check remount filesystem if unmounted @@ -485,10 +472,7 @@ impl Transition for CheckWearLeveling { _scsi_info: &Option<(ScsiInfo, Option)>, _simulate: bool, ) -> State { - debug!( - "thread {} running CheckWearLeveling transition", - process::id() - ); + debug!("thread {} running CheckWearLeveling transition", process::id()); //TODO: How can we check wear leveling? to_state @@ -512,12 +496,7 @@ impl Transition for Eval { false } }; - debug!( - "thread {} {} blank {}", - process::id(), - device.dev_path.display(), - blank - ); + debug!("thread {} {} blank {}", process::id(), device.dev_path.display(), blank); if blank { debug!("thread {} Assuming blank disk is good", process::id()); return to_state; @@ -539,11 +518,7 @@ impl Transition for Eval { if device.mount_point.is_none() { debug!("Try mounting in EVAL"); - debug!( - "thread {} Mounting device: {}", - process::id(), - device.dev_path.display() - ); + debug!("thread {} Mounting device: {}", process::id(), device.dev_path.display()); let mnt_dir = match TempDir::new("bynar") { Ok(d) => d, Err(e) => { @@ -559,10 +534,7 @@ impl Transition for Eval { device.mount_point = Some(mnt_dir.into_path()); } debug!("thread {} Checking if mount is writable", process::id()); - let mnt = &device - .mount_point - .as_ref() - .expect("drive.mount_point is None but it cannot be"); + let mnt = &device.mount_point.as_ref().expect("drive.mount_point is None but it cannot be"); match check_writable(&mnt) { // Mount point is writeable, smart passed. Good to go Ok(_) => { @@ -606,10 +578,7 @@ impl Transition for MarkForReplacement { _scsi_info: &Option<(ScsiInfo, Option)>, _simulate: bool, ) -> State { - debug!( - "thread {} running MarkForReplacement transition", - process::id() - ); + debug!("thread {} running MarkForReplacement transition", process::id()); to_state } } @@ -621,11 +590,7 @@ impl Transition for Mount { _scsi_info: &Option<(ScsiInfo, Option)>, _simulate: bool, ) -> State { - debug!( - "thread {} Mounting device: {}", - process::id(), - device.dev_path.display() - ); + debug!("thread {} Mounting device: {}", process::id(), device.dev_path.display()); let mnt_dir = match TempDir::new("bynar") { Ok(d) => d, Err(e) => { @@ -674,9 +639,7 @@ impl Transition for Reformat { // We need to update the UUID of the block device now. let blkid = BlkId::new(&device.dev_path).expect("blkid creation failed"); blkid.do_probe().expect("blkid probe failed"); - let drive_uuid = blkid - .lookup_value("UUID") - .expect("blkid lookup uuid failed"); + let drive_uuid = blkid.lookup_value("UUID").expect("blkid lookup uuid failed"); debug!( "thread {} drive_uuid: {}", process::id(), @@ -755,11 +718,7 @@ impl Transition for Replace { to_state } Err(e) => { - error!( - "Unable to find device: {}. {:?}", - device.dev_path.display(), - e - ); + error!("Unable to find device: {}. {:?}", device.dev_path.display(), e); State::Fail } } @@ -800,12 +759,7 @@ impl Transition for Scan { }, (_, Vendor::Hp) => { // is_raid_backed unpacks the Option so this should be safe - match &scsi_info - .as_ref() - .expect("scsi_info is None but cannot be") - .0 - .state - { + match &scsi_info.as_ref().expect("scsi_info is None but cannot be").0.state { Some(state) => { debug!("thread {} scsi device state: {}", process::id(), state); if *state == DeviceState::Running { @@ -877,8 +831,7 @@ impl StateMachine { // Just for debugging dot graph creation transition_label: &str, ) { - self.dot_graph - .push((from_state, to_state, transition_label.to_string())); + self.dot_graph.push((from_state, to_state, transition_label.to_string())); self.graph.add_edge(from_state, to_state, callback); } @@ -903,12 +856,7 @@ impl StateMachine { // If the state transition returns State::Fail try the next path let beginning_state = self.block_device.state; for e in edges { - debug!( - "thread {} Attempting {} to {} transition", - process::id(), - &e.0, - &e.1 - ); + debug!("thread {} Attempting {} to {} transition", process::id(), &e.0, &e.1); let state = e.2(e.1, &mut self.block_device, &self.scsi_info, self.simulate); match state { State::Fail => { @@ -920,10 +868,7 @@ impl StateMachine { } State::WaitingForReplacement => { // TODO: Is this the only state we shouldn't advance further from? - debug!( - "thread {} state==State::WaitingForReplacement", - process::id() - ); + debug!("thread {} state==State::WaitingForReplacement", process::id()); self.block_device.state = state; break 'outer; } @@ -986,18 +931,8 @@ impl StateMachine { // states are ordered from most to least ideal outcome. self.add_transition(State::Unscanned, State::Scanned, Scan::transition, "Scan"); self.add_transition(State::Unscanned, State::Fail, Scan::transition, "Scan"); - self.add_transition( - State::NotMounted, - State::Mounted, - Mount::transition, - "Mount", - ); - self.add_transition( - State::NotMounted, - State::MountFailed, - Mount::transition, - "Mount", - ); + self.add_transition(State::NotMounted, State::Mounted, Mount::transition, "Mount"); + self.add_transition(State::NotMounted, State::MountFailed, Mount::transition, "Mount"); self.add_transition( State::MountFailed, State::Corrupt, @@ -1016,18 +951,8 @@ impl StateMachine { ); self.add_transition(State::Mounted, State::Scanned, NoOp::transition, "NoOp"); - self.add_transition( - State::ReadOnly, - State::Mounted, - Remount::transition, - "Remount", - ); - self.add_transition( - State::ReadOnly, - State::MountFailed, - Remount::transition, - "Remount", - ); + self.add_transition(State::ReadOnly, State::Mounted, Remount::transition, "Remount"); + self.add_transition(State::ReadOnly, State::MountFailed, Remount::transition, "Remount"); self.add_transition( State::Corrupt, @@ -1035,12 +960,7 @@ impl StateMachine { AttemptRepair::transition, "AttemptRepair", ); - self.add_transition( - State::Corrupt, - State::RepairFailed, - NoOp::transition, - "NoOp", - ); + self.add_transition(State::Corrupt, State::RepairFailed, NoOp::transition, "NoOp"); self.add_transition( State::RepairFailed, @@ -1048,12 +968,7 @@ impl StateMachine { Reformat::transition, "Reformat", ); - self.add_transition( - State::RepairFailed, - State::ReformatFailed, - NoOp::transition, - "NoOp", - ); + self.add_transition(State::RepairFailed, State::ReformatFailed, NoOp::transition, "NoOp"); self.add_transition( State::ReformatFailed, @@ -1062,12 +977,7 @@ impl StateMachine { "NoOp", ); - self.add_transition( - State::Reformatted, - State::Unscanned, - NoOp::transition, - "NoOp", - ); + self.add_transition(State::Reformatted, State::Unscanned, NoOp::transition, "NoOp"); self.add_transition( State::WornOut, @@ -1330,11 +1240,7 @@ fn add_previous_devices( &device_name, None, )?; - debug!( - "{} awaiting repair: {}", - device_path.display(), - awaiting_repair - ); + debug!("{} awaiting repair: {}", device_path.display(), awaiting_repair); // So this never trips because the database thinks this disk is still good if !awaiting_repair { let b = BlockDevice { @@ -1504,18 +1410,11 @@ fn repair_filesystem(filesystem_type: &FilesystemType, device: &Path) -> BynarRe #[cfg_attr(test, mockable)] fn check_writable(path: &Path) -> BynarResult<()> { - debug!( - "thread {} Checking if {:?} is writable", - process::id(), - path - ); + debug!("thread {} Checking if {:?} is writable", process::id(), path); let temp_path = TempDir::new_in(path, "bynar")?; let file_path = temp_path.path().join("write_test"); debug!("thread {} Creating: {}", process::id(), file_path.display()); - let mut file = OpenOptions::new() - .write(true) - .create_new(true) - .open(file_path)?; + let mut file = OpenOptions::new().write(true).create_new(true).open(file_path)?; file.write_all(b"Hello, world!")?; Ok(()) } @@ -1533,11 +1432,7 @@ fn check_lvm(device: &Path) -> BynarResult { for v in vol_names { let vg = lvm.vg_open(&v, &OpenMode::Read)?; let physical_vols = vg.list_pvs()?; - trace!( - "thread {} lvm physical volumes: {:?}", - process::id(), - physical_vols - ); + trace!("thread {} lvm physical volumes: {:?}", process::id(), physical_vols); for p in physical_vols { trace!("thread {} physical volume: {}", process::id(), p.get_name()); if device == Path::new(&p.get_name()) { @@ -1552,21 +1447,14 @@ fn check_lvm(device: &Path) -> BynarResult { fn check_xfs(device: &Path) -> BynarResult { //Any output that is produced when xfs_check is not run in verbose mode //indicates that the filesystem has an inconsistency. - debug!( - "thread {} Running xfs_repair -n to check for corruption", - process::id() - ); - let status = Command::new("xfs_repair") - .args(&vec!["-n", &device.to_string_lossy()]) - .status()?; + debug!("thread {} Running xfs_repair -n to check for corruption", process::id()); + let status = + Command::new("xfs_repair").args(&vec!["-n", &device.to_string_lossy()]).status()?; match status.code() { Some(code) => match code { 0 => Ok(Fsck::Ok), 1 => Ok(Fsck::Corrupt), - _ => Err(BynarError::new(format!( - "xfs_repair failed with code: {}", - code - ))), + _ => Err(BynarError::new(format!("xfs_repair failed with code: {}", code))), }, //Process terminated by signal None => Err(BynarError::from("xfs_repair terminated by signal")), @@ -1587,13 +1475,8 @@ fn repair_xfs(device: &Path) -> BynarResult<()> { } fn check_ext(device: &Path) -> BynarResult { - debug!( - "thread {} running e2fsck -n to check for errors", - process::id() - ); - let status = Command::new("e2fsck") - .args(&["-n", &device.to_string_lossy()]) - .status()?; + debug!("thread {} running e2fsck -n to check for errors", process::id()); + let status = Command::new("e2fsck").args(&["-n", &device.to_string_lossy()]).status()?; match status.code() { Some(code) => { match code { @@ -1601,10 +1484,7 @@ fn check_ext(device: &Path) -> BynarResult { 0 => Ok(Fsck::Ok), //4 - File system errors left uncorrected. This requires repair 4 => Ok(Fsck::Corrupt), - _ => Err(BynarError::new(format!( - "e2fsck returned error code: {}", - code - ))), + _ => Err(BynarError::new(format!("e2fsck returned error code: {}", code))), } } //Process terminated by signal @@ -1616,9 +1496,7 @@ fn repair_ext(device: &Path) -> BynarResult<()> { //Run a noninteractive fix. This will exit with return code 4 //if it needs human intervention. debug!("running e2fsck -p for noninteractive repair"); - let status = Command::new("e2fsck") - .args(&["-p", &device.to_string_lossy()]) - .status()?; + let status = Command::new("e2fsck").args(&["-p", &device.to_string_lossy()]).status()?; match status.code() { Some(code) => { match code { @@ -1629,10 +1507,7 @@ fn repair_ext(device: &Path) -> BynarResult<()> { //2 - File system errors corrected, system should //be rebooted 2 => Ok(()), - _ => Err(BynarError::new(format!( - "e2fsck returned error code: {}", - code - ))), + _ => Err(BynarError::new(format!("e2fsck returned error code: {}", code))), } } //Process terminated by signal @@ -1644,16 +1519,13 @@ fn repair_ext(device: &Path) -> BynarResult<()> { #[cfg_attr(test, mockable)] fn run_smartctl_check(device: &Path) -> BynarResult { // Enable Smart Scan - let out = Command::new("smartctl") - .args(&["-s", "on", &device.to_string_lossy()]) - .output()?; + let out = Command::new("smartctl").args(&["-s", "on", &device.to_string_lossy()]).output()?; let status = match out.status.code() { Some(code) => match code { // no errors, smart enabled 0 => { - let out = Command::new("smartctl") - .args(&["-H", &device.to_string_lossy()]) - .output()?; //Run overall health scan + let out = + Command::new("smartctl").args(&["-H", &device.to_string_lossy()]).output()?; //Run overall health scan match out.status.code() { Some(code) => match code { // no errors, health scan successful @@ -1730,11 +1602,7 @@ fn is_device_mounted(dev_path: &Path) -> bool { let partitions = match read_partitions(&dev_path, &disk_header, disk::DEFAULT_SECTOR_SIZE) { Ok(p) => p, Err(e) => { - warn!( - "thread {} Unable to read disk partitions: {}", - process::id(), - e - ); + warn!("thread {} Unable to read disk partitions: {}", process::id(), e); return false; } }; @@ -1742,17 +1610,9 @@ fn is_device_mounted(dev_path: &Path) -> bool { for p in partitions.iter().enumerate() { let tmp = format!("{name}{num}", name = dev_path.display(), num = p.0 + 1); let partition_path = Path::new(&tmp); - debug!( - "thread {} partition_path: {}", - process::id(), - partition_path.display() - ); + debug!("thread {} partition_path: {}", process::id(), partition_path.display()); if let Ok(Some(mount)) = block_utils::get_mountpoint(&partition_path) { - debug!( - "thread {} partition mount: {}", - process::id(), - mount.display() - ); + debug!("thread {} partition mount: {}", process::id(), mount.display()); return true; } } @@ -1774,11 +1634,7 @@ fn is_disk_blank(dev: &Path) -> BynarResult { for v in vol_names { let vg = lvm.vg_open(&v, &OpenMode::Read)?; let physical_vols = vg.list_pvs()?; - trace!( - "thread {} lvm physical volumes: {:?}", - process::id(), - physical_vols - ); + trace!("thread {} lvm physical volumes: {:?}", process::id(), physical_vols); for p in physical_vols { trace!("thread {} physical volume: {}", process::id(), p.get_name()); if dev == Path::new(&p.get_name()) { @@ -1788,10 +1644,7 @@ fn is_disk_blank(dev: &Path) -> BynarResult { } } - debug!( - "thread {} Attempting to read gpt disk header", - process::id() - ); + debug!("thread {} Attempting to read gpt disk header", process::id()); if read_header(&dev, disk::DEFAULT_SECTOR_SIZE).is_ok() { // We found a gpt header return Ok(false); @@ -1801,11 +1654,7 @@ fn is_disk_blank(dev: &Path) -> BynarResult { return Ok(false); } let device = get_device_info(dev)?; - debug!( - "thread {} Mounting device: {}", - process::id(), - dev.display() - ); + debug!("thread {} Mounting device: {}", process::id(), dev.display()); let mnt_dir = TempDir::new("bynar")?; match mount_device(&device, &mnt_dir.path()) { Ok(_) => { @@ -1813,12 +1662,7 @@ fn is_disk_blank(dev: &Path) -> BynarResult { Ok(false) } Err(e) => { - debug!( - "thread {} Mounting {} failed: {}", - process::id(), - dev.display(), - e - ); + debug!("thread {} Mounting {} failed: {}", process::id(), dev.display(), e); //If the partition is EMPTY, it should be mountable, which means if it ISN'T mountable its probably corrupt (and not blank) Ok(false) } diff --git a/src/test_hardware.rs b/src/test_hardware.rs index 4fd5d24..0af4fbc 100644 --- a/src/test_hardware.rs +++ b/src/test_hardware.rs @@ -63,14 +63,8 @@ fn collect_redfish_info(config: &ConfigSettings) -> BynarResult Date: Mon, 24 Feb 2020 10:24:35 -0500 Subject: [PATCH 49/76] squish tests down --- src/main.rs | 246 +++++++++++++++------------------------------------- 1 file changed, 69 insertions(+), 177 deletions(-) diff --git a/src/main.rs b/src/main.rs index ceb8f43..6426bf9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1544,8 +1544,29 @@ mod tests { use super::*; use block_utils::*; - // ------------------- Test create_msg_map ------------------ + // list of devices to use in some test functions + fn get_devices() -> Vec { + [ + PathBuf::from("/dev/sda"), + PathBuf::from("/dev/sdb"), + PathBuf::from("/dev/sdc"), + PathBuf::from("/dev/sdd"), + ] + .to_vec() + } + //list of partitions to use in some test functions + fn get_partitions() -> Vec { + [ + PathBuf::from("/dev/sda1"), + PathBuf::from("/dev/sda2"), + PathBuf::from("/dev/sdc1"), + PathBuf::from("/dev/sdd1"), + PathBuf::from("/dev/sdd2"), + PathBuf::from("/dev/sdd3"), + ] + .to_vec() + } #[test] // This tests the filter(s) used to get a list of devices fn test_filter_block_devices() { @@ -1644,24 +1665,10 @@ mod tests { // this is testing the expected behavior of parts inside the function assuming certain call result fn test_create_msg_map_with_partitions() { // since we want to test specifically the partitions we need an explicit device list - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + let devices: Vec = get_devices(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); + let partitions: Vec = get_partitions(); println!("List of partitions: \n{:#?}", partitions); devices.iter().for_each(|device| { // make a new hashmap @@ -1738,9 +1745,7 @@ mod tests { // this is testing the expected behavior of parts inside the function assuming certain call result fn test_create_msg_map_with_db() { // since we want to test specifically the partitions we need an explicit device list - let mut devices: Vec = - [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdb"), PathBuf::from("/dev/sdd")] - .to_vec(); + let mut devices: Vec = get_devices(); println!("List of devices: \n{:#?}", devices); let mut map: HashMap>> = HashMap::new(); let db_devices: Vec = [ @@ -1850,15 +1855,20 @@ mod tests { }); } - // ----------- Test the add_or_update_map_op function ------------- - #[test] - // test if, given a partition path that is not in the map (but the parent is) - // add the partition to the map with the given operation - fn test_add_or_update_map_op_partition_add() { + // create empty map with just /dev/sda for testing + fn empty_sda_map() -> HashMap>>{ let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); disk_map.insert(PathBuf::from("/dev/sda"), None); map.insert(PathBuf::from("/dev/sda"), disk_map); + map + } + + #[test] + // test if, given a partition path that is not in the map (but the parent is) + // add the partition to the map with the given operation + fn test_add_or_update_map_op_partition_add() { + let mut map: HashMap>> = empty_sda_map(); println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sda1"); @@ -1885,8 +1895,7 @@ mod tests { let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); disk_map.insert(PathBuf::from("/dev/sda"), None); - let mut op = Operation::new(); - op.set_Op_type(Op::Remove); + let mut op = helpers::make_op!(Remove); let disk_op = DiskOp::new(op, Some("test update".to_string()), Some(0)); disk_map.insert(PathBuf::from("/dev/sda1"), Some(disk_op)); map.insert(PathBuf::from("/dev/sda"), disk_map); @@ -1919,10 +1928,7 @@ mod tests { // test if, given a partition path that is not in the map and whose parent is not // in the map, insert the partition + parent disk into the map fn test_add_or_update_map_op_partition_insert() { - let mut map: HashMap>> = HashMap::new(); - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(PathBuf::from("/dev/sda"), None); - map.insert(PathBuf::from("/dev/sda"), disk_map); + let mut map: HashMap>> = empty_sda_map(); println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sdb1"); @@ -1979,8 +1985,7 @@ mod tests { fn test_add_or_update_map_op_parent_update() { let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); - let mut op = Operation::new(); - op.set_Op_type(Op::Remove); + let mut op = helpers::make_op!(Remove); let disk_op = DiskOp::new(op, Some("test update".to_string()), Some(0)); disk_map.insert(PathBuf::from("/dev/sda"), Some(disk_op)); map.insert(PathBuf::from("/dev/sda"), disk_map); @@ -2013,10 +2018,7 @@ mod tests { // test if, given a disk path that is not in the disk map nor the req map // create a new disk map with the disk path and insert into the req map fn test_add_or_update_map_op_parent_insert() { - let mut map: HashMap>> = HashMap::new(); - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(PathBuf::from("/dev/sda"), None); - map.insert(PathBuf::from("/dev/sda"), disk_map); + let mut map: HashMap>> = empty_sda_map(); println!("Initial Map: \n{:#?}", map); let insert_path = PathBuf::from("/dev/sdb"); @@ -2134,27 +2136,11 @@ mod tests { assert!(req_disk_map.get(&parent).unwrap().is_none()); } - #[test] - // check filter disks that are Waiting for Replacement with map having None - // no in progress check since all paths should have None - fn test_get_replacing_vec_none() { - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + // create empty map for testing with None values + fn create_none_map() -> HashMap>>{ + let devices: Vec = get_devices(); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); + let partitions: Vec = get_partitions(); devices.iter().for_each(|device| { // make a new hashmap let mut disk_map: HashMap> = HashMap::new(); @@ -2170,6 +2156,13 @@ mod tests { }); map.insert(device.to_path_buf(), disk_map); }); + map + } + #[test] + // check filter disks that are Waiting for Replacement with map having None + // no in progress check since all paths should have None + fn test_get_replacing_vec_none() { + let mut map: HashMap>> = create_none_map(); println!("Initial Hashmap: \n{:#?}", map); let states: Vec = [ @@ -2208,27 +2201,12 @@ mod tests { println!("Replacing: {:#?}", replacing); assert_eq!(replacing, [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sdc1")].to_vec()); } - #[test] - // check filter disks that are Waiting for Replacement with map having Add - // in progress yes or no - fn test_get_replacing_vec_add() { - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + + // create initial map for testing Add + fn create_add_map() -> HashMap>> { + let devices: Vec = get_devices(); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); + let partitions: Vec = get_partitions(); devices.iter().for_each(|device| { // make a new hashmap let mut disk_map: HashMap> = HashMap::new(); @@ -2248,6 +2226,14 @@ mod tests { }); map.insert(device.to_path_buf(), disk_map); }); + map + } + + #[test] + // check filter disks that are Waiting for Replacement with map having Add + // in progress yes or no + fn test_get_replacing_vec_add() { + let mut map: HashMap>> = create_add_map(); println!("Initial Hashmap: \n{:#?}", map); let states: Vec = [ @@ -2294,23 +2280,9 @@ mod tests { #[test] // check filter disks that are Waiting for Replacement with map having SafeToRemove || Remove fn test_get_replacing_vec_exists() { - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + let devices: Vec = get_devices(); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); + let partitions: Vec = get_partitions(); devices.iter().for_each(|device| { // make a new hashmap let mut disk_map: HashMap> = HashMap::new(); @@ -2386,38 +2358,7 @@ mod tests { // map all nones fn test_add_related_paths_none() { // init the map - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); - let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); - devices.iter().for_each(|device| { - // make a new hashmap - let mut disk_map: HashMap> = HashMap::new(); - disk_map.insert(device.to_path_buf(), None); - // check if partition parent is device - partitions - .iter() - .filter(|partition| { - partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) - }) - .for_each(|partition| { - disk_map.insert(partition.to_path_buf(), None); - }); - map.insert(device.to_path_buf(), disk_map); - }); + let mut map: HashMap>> = create_none_map(); println!("Initial Hashmap: \n{:#?}", map); @@ -2476,42 +2417,7 @@ mod tests { // test adding related partitions/disks to list // map all Add fn test_add_related_paths_add() { - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); - let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); - devices.iter().for_each(|device| { - // make a new hashmap - let mut disk_map: HashMap> = HashMap::new(); - let op = Operation::new(); - let disk_op = DiskOp::new(op, None, None); - disk_map.insert(device.to_path_buf(), Some(disk_op)); - // check if partition parent is device - partitions - .iter() - .filter(|partition| { - partition.to_string_lossy().contains(&device.to_string_lossy().to_string()) - }) - .for_each(|partition| { - let op = Operation::new(); - let disk_op = DiskOp::new(op, None, None); - disk_map.insert(partition.to_path_buf(), Some(disk_op)); - }); - map.insert(device.to_path_buf(), disk_map); - }); + let mut map: HashMap>> = create_add_map(); println!("Initial Hashmap: \n{:#?}", map); let states: Vec = [ @@ -2573,23 +2479,9 @@ mod tests { // test adding related partitions/disks to list // map all SafeToRemove or Removes fn test_add_related_paths_empty() { - let devices: Vec = [ - PathBuf::from("/dev/sda"), - PathBuf::from("/dev/sdb"), - PathBuf::from("/dev/sdc"), - PathBuf::from("/dev/sdd"), - ] - .to_vec(); + let devices = get_devices(); let mut map: HashMap>> = HashMap::new(); - let partitions: Vec = [ - PathBuf::from("/dev/sda1"), - PathBuf::from("/dev/sda2"), - PathBuf::from("/dev/sdc1"), - PathBuf::from("/dev/sdd1"), - PathBuf::from("/dev/sdd2"), - PathBuf::from("/dev/sdd3"), - ] - .to_vec(); + let partitions: Vec = get_partitions(); devices.iter().for_each(|device| { // make a new hashmap let mut disk_map: HashMap> = HashMap::new(); From 0d2c8178d0061a71ee5d9f09f85fe8063afd1a6b Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 25 Feb 2020 14:49:24 -0500 Subject: [PATCH 50/76] Fix all finish check for SafeToRemove handling and add unit testing --- src/main.rs | 118 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 112 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6426bf9..d4ddacb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -884,9 +884,14 @@ fn handle_operation_result( current_op.ret_val = Some(op_res); //push op back into map add_or_update_map_op(message_map, &dev_path, Some(current_op))?; - return Ok(()); + } else { + return Err(BynarError::from(format!( + "{} on host {} does not have a currently running operation!", + dev_path.display(), + host_info.hostname + ))); } - // check if allll the other paths in disk are SafeToRemove (and not Success) + // check if all the other paths in disk are SafeToRemove (and not Success) // check if all ops in the disk have finished let disk = get_disk_map_op(message_map, &dev_path)?; let mut all_finished = true; @@ -894,13 +899,17 @@ fn handle_operation_result( //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if ret.get_outcome() != OpOutcome::Success + if !(ret.get_outcome() != OpOutcome::Success && (ret.get_op_type() == Op::SafeToRemove - || ret.get_op_type() == Op::Remove) + || ret.get_op_type() == Op::Remove)) { all_finished = false; } + } else { + all_finished = false; } + } else { + all_finished = false; } }); // if so, notify slack @@ -1856,7 +1865,7 @@ mod tests { } // create empty map with just /dev/sda for testing - fn empty_sda_map() -> HashMap>>{ + fn empty_sda_map() -> HashMap>> { let mut map: HashMap>> = HashMap::new(); let mut disk_map: HashMap> = HashMap::new(); disk_map.insert(PathBuf::from("/dev/sda"), None); @@ -2137,7 +2146,7 @@ mod tests { } // create empty map for testing with None values - fn create_none_map() -> HashMap>>{ + fn create_none_map() -> HashMap>> { let devices: Vec = get_devices(); let mut map: HashMap>> = HashMap::new(); let partitions: Vec = get_partitions(); @@ -2586,4 +2595,101 @@ mod tests { println!("Sorted and unique: {:#?}", replacing); assert_eq!(compare, replacing); } + + #[test] + // test all finished check where disk_map is all finished and mixed Remove/SafeToRemove + fn test_all_finished_mixed() { + let mut disk_map: HashMap> = HashMap::new(); + + let disk_paths = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")] + .to_vec(); + disk_paths.iter().for_each(|path| { + let mut safe_to_rem = OpOutcomeResult::new(); + let mut op = Operation::new(); + safe_to_rem.set_outcome(OpOutcome::Skipped); + if path == &PathBuf::from("/dev/sda") { + safe_to_rem.set_op_type(Op::Remove); + op.set_Op_type(Op::Remove); + } else { + safe_to_rem.set_op_type(Op::SafeToRemove); + op.set_Op_type(Op::SafeToRemove); + } + let mut disk_op = DiskOp::new(op, None, None); + disk_op.ret_val = Some(safe_to_rem); + disk_map.insert(path.to_path_buf(), Some(disk_op)); + }); + println!("Initial Disk Map: {:#?}", disk_map); + + let mut all_finished = true; + disk_map.iter().for_each(|(k, v)| { + //check if value finished + if let Some(val) = v { + if let Some(ret) = &val.ret_val { + if !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + { + all_finished = false; + } + } else { + all_finished = false; + } + } else { + all_finished = false; + } + }); + assert!(all_finished); + } + + #[test] + // test all finished check where disk_map is not finished + fn test_all_finished_mixed_fail() { + let mut disk_map: HashMap> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + + let disk_paths = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")] + .to_vec(); + disk_paths.iter().for_each(|path| { + let mut safe_to_rem = OpOutcomeResult::new(); + let mut op = Operation::new(); + if path == &PathBuf::from("/dev/sda2") { + safe_to_rem.set_outcome(OpOutcome::Success); + } else { + safe_to_rem.set_outcome(OpOutcome::Skipped); + } + if path == &PathBuf::from("/dev/sda") { + safe_to_rem.set_op_type(Op::Remove); + op.set_Op_type(Op::Remove); + } else { + safe_to_rem.set_op_type(Op::SafeToRemove); + op.set_Op_type(Op::SafeToRemove); + } + let mut disk_op = DiskOp::new(op, None, None); + disk_op.ret_val = Some(safe_to_rem); + disk_map.insert(path.to_path_buf(), Some(disk_op)); + }); + println!("Initial Disk Map: {:#?}", disk_map); + + let mut all_finished = true; + disk_map.iter().for_each(|(k, v)| { + //check if value finished + if let Some(val) = v { + if let Some(ret) = &val.ret_val { + if !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + { + all_finished = false; + } + } else { + all_finished = false; + } + } else { + all_finished = false; + } + }); + assert!(!all_finished); + } } From a7e03d376171da289ac56ec6210befdd0af5d243 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 25 Feb 2020 14:56:57 -0500 Subject: [PATCH 51/76] use new function get_request_keys instead --- src/main.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/main.rs b/src/main.rs index d4ddacb..2a56b1b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -925,12 +925,7 @@ fn handle_operation_result( ), ); // get the path of the disk - let path = - if let Some(parent) = block_utils::get_parent_devpath_from_path(&dev_path)? { - parent - } else { - dev_path - }; + let path = get_request_keys(&dev_path)?.0; // get the current op associated with the disk if let Some(current_op) = get_map_op(message_map, &path)? { let description = match current_op.description { @@ -1089,12 +1084,8 @@ fn handle_operation_result( ), ); // get the path of the disk - let path = - if let Some(parent) = block_utils::get_parent_devpath_from_path(&dev_path)? { - parent - } else { - dev_path - }; + let path = get_request_keys(&dev_path)?.0; + // get the current op associated with the disk if let Some(current_op) = get_map_op(message_map, &path)? { let description = match current_op.description { From 55d2107c57add532504d5a2a998d78f4b96d4fb2 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 26 Feb 2020 13:06:30 -0500 Subject: [PATCH 52/76] Clippy and code squishing --- src/backend/ceph.rs | 64 +++++++++++++++++-------------------- src/client.rs | 2 -- src/disk_manager.rs | 11 +++---- src/lib/lib.rs | 19 +++++------ src/main.rs | 78 ++++++++++++++++++++------------------------- 5 files changed, 76 insertions(+), 98 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index d02b658..2bacf4e 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -389,7 +389,7 @@ impl CephBackend { let new_osd_id = osd_create(&self.cluster_handle, id, simulate)?; debug!("New osd id created: {:?}", new_osd_id); let osd_fsid = uuid::Uuid::new_v4(); - let (lv_dev_name, vg_size) = + let (lv_dev_name, _vg_size) = self.create_lvm(&osd_fsid, new_osd_id, &dev_path, journal.as_ref())?; // Mount the drive @@ -722,7 +722,7 @@ impl CephBackend { // is a block.wal journal partition). Do nothing if there is no journal fn remove_journal(&self, journal_path: &Path) -> BynarResult<()> { trace!("Journal path is {}", journal_path.display()); - if let (Some(part_id), device) = block_utils::get_device_from_path(&journal_path)? { + if let (Some(part_id), _) = block_utils::get_device_from_path(&journal_path)? { trace!("Partition number is {}", part_id); if let Some(parent_path) = block_utils::get_parent_devpath_from_path(&journal_path)? { //check if parent device is in journal devices @@ -982,7 +982,7 @@ impl CephBackend { } // check if the osd is out of the cluster - fn is_osd_out(&self, osd_id: u64, simulate: bool) -> BynarResult { + fn is_osd_out(&self, osd_id: u64, _simulate: bool) -> BynarResult { let out_tree = osd_tree_status(&self.cluster_handle, ceph::cmd::CrushNodeStatus::Out)?; for node in out_tree.nodes { if node.id as u64 == osd_id { @@ -1250,7 +1250,6 @@ impl CephBackend { // weight the osd slowly to the target weight so as not to introduce too // much latency into the cluster fn gradual_weight(&self, osd_id: u64, is_add: bool, simulate: bool) -> BynarResult<()> { - let crush_tree = osd_tree(&self.cluster_handle)?; debug!("Gradually weighting osd: {}", osd_id); //set noscrub (remember to handle error by unsetting noscrub) self.set_noscrub(simulate)?; @@ -1301,22 +1300,10 @@ impl Backend for CephBackend { } //check if manual bluestore let osd_config = get_osd_config_by_path(&self.config, device)?; - let path_check; - let mut part2: String = device.to_string_lossy().to_string(); - if !osd_config.is_lvm { - if let Some(e) = block_utils::get_parent_devpath_from_path(device)? { - part2.truncate(part2.len() - 1); - part2.push_str("2"); - path_check = Path::new(&part2); - } else { - part2.push_str("2"); - path_check = Path::new(&part2); - } - } else { - path_check = device; - } + let path_check = + if !osd_config.is_lvm { get_second_partition(device)? } else { device.to_path_buf() }; // check if the disk is already out of the cluster - if !is_device_in_cluster(&self.cluster_handle, path_check)? { + if !is_device_in_cluster(&self.cluster_handle, &path_check)? { debug!("Device {} is already out of the cluster. Skipping", device.display()); return Ok(OpOutcome::SkipRepeat); } @@ -1347,7 +1334,7 @@ impl Backend for CephBackend { Ok(OpOutcome::Success) } - fn safe_to_remove(&self, device: &Path, simulate: bool) -> BynarResult<(OpOutcome, bool)> { + fn safe_to_remove(&self, device: &Path, _simulate: bool) -> BynarResult<(OpOutcome, bool)> { // check if the disk is a system disk or journal disk first and skip evaluation if so. if is_system_disk(&self.config.system_disks, device) || is_journal(&self.config.journal_devices, device) @@ -1358,21 +1345,10 @@ impl Backend for CephBackend { //check if manual bluestore let osd_config = get_osd_config_by_path(&self.config, device)?; let osd_id = if !osd_config.is_lvm { - if let Some(e) = block_utils::get_parent_devpath_from_path(device)? { - let mut part2: String = device.to_string_lossy().to_string(); - part2.truncate(part2.len() - 1); - part2.push_str("2"); - let part2 = Path::new(&part2); - debug!("CHECKING PATH {}", part2.display()); - //get the osd id - get_osd_id_from_device(&self.cluster_handle, part2)? - } else { - let mut part2: String = device.to_string_lossy().to_string(); - part2.push_str("2"); - let part2 = Path::new(&part2); - debug!("CHECKING PATH {}", part2.display()); - get_osd_id_from_device(&self.cluster_handle, part2)? - } + let part2 = &get_second_partition(device)?; + debug!("CHECKING PATH {}", part2.display()); + //get the osd id + get_osd_id_from_device(&self.cluster_handle, part2)? } else { //get the osd id get_osd_id_from_device(&self.cluster_handle, device)? @@ -1382,6 +1358,24 @@ impl Backend for CephBackend { } } +//get second partition if it exists, and check if the path is valid +fn get_second_partition(device: &Path) -> BynarResult { + let mut str_path = device.to_string_lossy().to_string(); + while !str_path.is_empty() && str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); + } + str_path.push_str("2"); + let path = PathBuf::from(str_path); + if path.exists() { + Ok(path) + } else { + Err(BynarError::from(format!( + "Unable to get second partition for path {}", + device.display() + ))) + } +} + // Check if a device path is already in the cluster fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path) -> BynarResult { debug!("Check if device is in cluster"); diff --git a/src/client.rs b/src/client.rs index 34a1325..c7a0b15 100644 --- a/src/client.rs +++ b/src/client.rs @@ -49,7 +49,6 @@ fn add_disk( } ResultType::ERR => { if op_result.has_error_msg() { - let msg = op_result.get_error_msg(); return Err(BynarError::from(op_result.get_error_msg())); } else { error!("error_msg not set"); @@ -116,7 +115,6 @@ fn remove_disk( } ResultType::ERR => { if op_result.has_error_msg() { - let msg = op_result.get_error_msg(); return Err(BynarError::from(op_result.get_error_msg())); } else { error!("error_msg not set"); diff --git a/src/disk_manager.rs b/src/disk_manager.rs index f5552b2..9265ffa 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -310,7 +310,7 @@ fn listen( let mut msg = responder.recv_bytes(0)?; debug!("Got msg len: {}", msg.len()); trace!("Parsing msg {:?} as hex", msg); - if msg.len() == 0 { + if msg.is_empty() { continue; } while !msg.is_empty() { @@ -318,23 +318,20 @@ fn listen( Ok(bytes) => bytes, Err(e) => { error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - break 'outer Ok(()); continue; } }; let client_id = client_id.clone(); let size = operation.write_to_bytes()?.len(); msg.drain((msg.len() - size)..msg.len()); - let send_res = send_res.clone(); - let send_disk = send_disk.clone(); - let send_ticket = send_ticket.clone(); + let (send_res, send_disk, send_ticket) = (send_res.clone(), send_disk.clone(), send_ticket.clone()); debug!("Operation requested: {:?}", operation.get_Op_type()); if op_no_disk(&responder, &operation) { continue; } // check if op is currently running. If so, skip it - if op_running!(&mut req_map, &operation) { + if op_running!(&req_map, &operation) { trace!("Operation {:?} cannot be run, disk is already running an operation", operation); //build OpOutcomeResult with SkipRepeat, send to output? let mut op_res = OpOutcomeResult::new(); @@ -343,7 +340,7 @@ fn listen( op_res.set_op_type(operation.get_Op_type()); op_res.set_result(ResultType::OK); op_res.set_value(false); - send_res.send((client_id, op_res)); + let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks continue; } op_insert(&mut req_map, &operation); diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 0f24b5c..1032906 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -295,7 +295,7 @@ pub struct DBConfig { /// get message(s) from the socket pub fn get_messages(s: &Socket) -> BynarResult> { - let id = s.recv_bytes(0)?; + let _id = s.recv_bytes(0)?; if s.get_rcvmore()? { return Ok(s.recv_bytes(0)?); } @@ -344,20 +344,17 @@ macro_rules! get_first_instance { return None; } while !copy.is_empty() { - match parse_from_bytes::<$mess_type>(©) { - Ok(mess) => { - let bytes = mess.write_to_bytes().unwrap(); - let size = bytes.len(); - //println!("compare {:?} with {:?}", bytes, copy); - if $message.starts_with(&bytes) { - $message.drain(0..size); - return Some(mess); - } + if let Ok(mess) = parse_from_bytes::<$mess_type>(©) { + let bytes = mess.write_to_bytes().unwrap(); + let size = bytes.len(); + //println!("compare {:?} with {:?}", bytes, copy); + if $message.starts_with(&bytes) { + $message.drain(0..size); + return Some(mess); } // we can't error out early since // the tag/wire bits are at the end and we can't tell // how long a message might be or what kind(s) are in the vec - Err(_) => {} } // parse from bytes grabs from the end of the byte array //so, remove half the length of bytes from the end of the message and try again diff --git a/src/main.rs b/src/main.rs index 2a56b1b..a63afa6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -95,7 +95,7 @@ fn create_msg_map( let db_devices: Vec = in_progress::get_devices_from_db(pool, host_mapping.storage_detail_id)? .into_iter() - .map(|(id, name, path)| path) + .map(|(_id, _name, path)| path) .collect(); let mut map: HashMap>> = HashMap::new(); @@ -140,34 +140,30 @@ fn create_msg_map( fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { Ok((parent, dev_path)) + } else if dev_path.exists() { + Ok((dev_path.to_path_buf(), dev_path)) } else { - if dev_path.exists() { - Ok((dev_path.to_path_buf(), dev_path)) + // partition was destroyed...probably + // make parent path + let mut str_path = dev_path.to_string_lossy().to_string(); + while str_path.chars().last().unwrap().is_digit(10) { + str_path = str_path[0..str_path.len() - 1].to_string(); + } + let path = PathBuf::from(str_path.to_string()); + if path.exists() { + Ok((path, dev_path)) // partition probably + } else if str_path.starts_with("/dev/sd") + || str_path.starts_with("/dev/hd") + || str_path.starts_with("/dev/nvme") + { + Ok((dev_path.to_path_buf(), dev_path)) // disk...probably } else { - // partition was destroyed...probably - // make parent path - let mut str_path = dev_path.to_string_lossy().to_string(); - while str_path.chars().last().unwrap().is_digit(10) { - str_path = str_path[0..str_path.len() - 1].to_string(); - } - let path = PathBuf::from(str_path.to_string()); - if path.exists() { - Ok((path, dev_path)) // partition probably - } else { - if str_path.starts_with("/dev/sd") - || str_path.starts_with("/dev/hd") - || str_path.starts_with("/dev/nvme") - { - Ok((dev_path.to_path_buf(), dev_path)) // disk...probably - } else { - // path just doesn't exist, so error... - error!("Path {} does not exist, nor does its parent.", dev_path.display()); - return Err(BynarError::from(format!( - "Path {} does not exist, nor does its parent.", - dev_path.display() - ))); - } - } + // path just doesn't exist, so error... + error!("Path {} does not exist, nor does its parent.", dev_path.display()); + Err(BynarError::from(format!( + "Path {} does not exist, nor does its parent.", + dev_path.display() + ))) } } } @@ -321,15 +317,13 @@ fn add_disk_to_description( } fn check_for_failed_disks( - config: &ConfigSettings, message_map: &mut HashMap>>, message_queue: &mut VecDeque<(Operation, Option, Option)>, host_info: &Host, pool: &Pool, host_mapping: &HostDetailsMapping, - simulate: bool, + _simulate: bool, ) -> BynarResult<()> { - let public_key = get_public_key(config, &host_info)?; //Host information to use in ticket creation let mut description = format!("A disk on {} failed. Please replace.", host_info.hostname); description.push_str(&format!( @@ -895,7 +889,7 @@ fn handle_operation_result( // check if all ops in the disk have finished let disk = get_disk_map_op(message_map, &dev_path)?; let mut all_finished = true; - disk.iter().for_each(|(k, v)| { + disk.iter().for_each(|(_, v)| { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { @@ -979,11 +973,11 @@ fn handle_operation_result( ))); } //otherwise error.... - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "{} on host {} does not have a currently running operation!", dev_path.display(), host_info.hostname - ))); + ))) } Op::Remove => { //check if successful or not and send to slack @@ -1065,7 +1059,7 @@ fn handle_operation_result( // check if all ops in the disk have finished let disk = get_disk_map_op(message_map, &dev_path)?; let mut all_finished = true; - disk.iter().for_each(|(k, v)| { + disk.iter().for_each(|(_, v)| { //check if value finished if let Some(val) = v { if val.ret_val.is_none() { @@ -1141,10 +1135,10 @@ fn handle_operation_result( } _ => { // these operations should never get called by Bynar - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "{} could not have run this operation!", op_res.get_disk() - ))); + ))) } } } @@ -1223,9 +1217,9 @@ fn send_and_recieve( None => { //Actually, this is a problem since Bynar only sends Add/SafeToRemove/Remove requests error!("Message is not an OpOutcomeResult"); - return Err(BynarError::from(format!( - "Message received is not an OpOutcomeResult" - ))); + return Err(BynarError::from( + "Message received is not an OpOutcomeResult".to_string(), + )); } } } @@ -1306,7 +1300,7 @@ fn main() { ); return; } - let config: ConfigSettings = config.expect("Failed to load config"); + let mut config: ConfigSettings = config.expect("Failed to load config"); let _ = CombinedLogger::init(loggers); let pidfile = format!("/var/log/{}", config.daemon_pid); //check if the pidfile exists @@ -1418,7 +1412,6 @@ fn main() { 'outer: loop { let now = Instant::now(); match check_for_failed_disks( - &config, &mut message_map, &mut message_queue, &host_info, @@ -1496,8 +1489,7 @@ fn main() { .expect("Unable to connect to slack"); return; } - let config: ConfigSettings = - config_file.expect("Failed to load config"); + config = config_file.expect("Failed to load config"); } signal_hook::SIGINT | signal_hook::SIGCHLD => { //skip this From 2facc6fcc0c091d1bd1ec404ba50670c13ff37c4 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 26 Feb 2020 14:16:17 -0500 Subject: [PATCH 53/76] pull out repeat behavior into smaller functions for reduced complexity --- src/main.rs | 530 ++++++++++++++-------------------------------------- 1 file changed, 143 insertions(+), 387 deletions(-) diff --git a/src/main.rs b/src/main.rs index a63afa6..36db82a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -79,14 +79,7 @@ fn create_msg_map( .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true - }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") + (p.to_string_lossy().starts_with("sr") || p.to_string_lossy().starts_with("loop")) } else { true }) @@ -193,9 +186,8 @@ fn add_or_update_map_op( //add to map let mut disk_map: HashMap> = HashMap::new(); disk_map.insert(parent.to_path_buf(), None); - let partitions = block_utils::get_block_partitions()?; // check if partition parent is device - partitions + block_utils::get_block_partitions()? .iter() .filter(|partition| { partition.to_string_lossy().contains(&parent.to_string_lossy().to_string()) @@ -232,7 +224,6 @@ fn remove_map_op( dev_path: &PathBuf, ) -> BynarResult> { let (parent, dev_path) = get_request_keys(dev_path)?; - //parent is in the map if let Some(disk) = message_map.get_mut(&parent) { if let Some(partition) = disk.clone().get(dev_path) { @@ -453,149 +444,6 @@ fn check_for_failed_disks( message_queue.push_back(mess2); } }); - /*for result in test_disk::check_all_disks(&host_info, pool, host_mapping)? { - match result { - Ok(state_machine) => { - info!( - "Disk status: /dev/{} {:?}", - state_machine.block_device.device.name, state_machine - ); - // just use state_machine.block_device.dev_path??? - let mut dev_path = PathBuf::from("/dev"); - let dev_name = &state_machine.block_device.device.name; - dev_path.push(&dev_name); - - if state_machine.block_device.state == State::WaitingForReplacement { - add_disk_to_description(&mut description, &dev_path, &state_machine); - trace!("Description: {}", description); - info!("Connecting to database to check if disk is in progress"); - let in_progress = in_progress::is_hardware_waiting_repair( - pool, - host_mapping.storage_detail_id, - &dev_name, - None, - )?; - match (simulate, in_progress) { - (false, true) => { - debug!("Device is already in the repair queue"); - } - (false, false) => { - debug!("Sending Safe-to-Remove and Remove requests"); - let op_id = match state_machine.block_device.operation_id { - None => { - error!( - "Operation not recorded for {}", - state_machine.block_device.dev_path.display() - ); - 0 - } - Some(i) => i, - }; - /*debug!("Asking disk-manager if it's safe to remove disk"); - // CALL RPC - let socket = helpers::connect( - &config.manager_host, - &config.manager_port.to_string(), - &public_key, - )?; - match ( - helpers::safe_to_remove_request(&socket, &dev_path), - config.slack_webhook.is_some(), - ) { - (Ok((OpOutcome::Success, true)), true) => { - debug!("safe to remove: true"); - //Ok to remove the disk - let _ = notify_slack( - config, - &format!( - "Removing disk: {} on host: {}", - dev_path.display(), - host_info.hostname - ), - ); - - match helpers::remove_disk_request( - &socket, &dev_path, None, false, - ) { - Ok(outcome) => match outcome { - OpOutcome::Success => debug!("Disk removal successful"), - OpOutcome::Skipped => { - debug!("Disk skipped, disk is not removable") - } - OpOutcome::SkipRepeat => { - debug!("Disk already removed, skipping.") - } - }, - Err(e) => { - error!("Disk removal failed: {}", e); - } - }; - } - (Ok((_, false)), true) => { - debug!("safe to remove: false"); - let _ = notify_slack( - config, - &format!( - "Need to remove disk {} but it's not safe \ - on host: {}. I need a human. Filing a ticket", - dev_path.display(), - host_info.hostname, - ), - ); - } - (Err(err), true) => { - //Not ok to remove the disk but we need to - let _ = notify_slack( - &config, - &format!( - "Need to remove disk {} but can't tell if it's \ - safe on host: {}. Error: {:?}. Filing a ticket", - dev_path.display(), - host_info.hostname, - err - ), - ); - } - (..) => {} - }; - debug!("Creating support ticket"); - let ticket_id = - create_support_ticket(config, "Bynar: Dead disk", &description)?; - debug!("Recording ticket id {} in database", ticket_id); - let op_id = match state_machine.block_device.operation_id { - None => { - error!( - "Operation not recorded for {}", - state_machine.block_device.dev_path.display() - ); - 0 - } - Some(i) => i, - }; - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?;*/ - } - (..) => {} - } - // Handle the ones that ended up stuck in Fail - } else if state_machine.block_device.state == State::Fail { - error!("Disk {} ended in a Fail state", dev_path.display(),); - } else { - // The rest should be State::Good ? - } - } - Err(e) => { - error!("check_all_disks failed with error: {:?}", e); - return Err(BynarError::new(format!( - "check_all_disks failed with error: {:?}", - e - ))); - } - }; - }*/ failed.iter().for_each(|state_machine| { error!("Disk {} ended in a Fail state", state_machine.block_device.dev_path.display()) }); @@ -722,46 +570,6 @@ fn add_repaired_disks( ); let tid = Some(ticket.ticket_id.to_string()); message_queue.push_back((op, tid, None)); - //CALL RPC - // add add_disk request to message_queue - /* - debug!("Connecting to disk-manager"); - let socket = helpers::connect( - &config.manager_host, - &config.manager_port.to_string(), - &public_key, - )?; - - match helpers::add_disk_request( - &socket, - &Path::new(&ticket.device_path), - None, - simulate, - ) { - Ok(outcome) => { - match outcome { - OpOutcome::Success => { - debug!("Disk added successfully. Updating database record") - } - // Disk was either boot or something that shouldn't be added via backend - OpOutcome::Skipped => debug!("Disk Skipped. Updating database record"), - // Disk is already in the cluster - OpOutcome::SkipRepeat => { - debug!("Disk already added. Skipping. Updating database record") - } - } - match in_progress::resolve_ticket_in_db(pool, &ticket.ticket_id) { - Ok(_) => debug!("Database updated"), - Err(e) => { - error!("Failed to resolve ticket {}. {:?}", ticket.ticket_id, e) - } - }; - } - Err(e) => { - error!("Failed to add disk: {:?}", e); - } - }; - */ } Ok(false) => {} Err(e) => { @@ -818,6 +626,119 @@ fn handle_add_disk_res( }; } +//update map with operation result +fn update_map_result( + message_map: &mut HashMap>>, + host_info: &Host, + dev_path: &PathBuf, + op_res: OpOutcomeResult, +) -> BynarResult<()> { + if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { + current_op.ret_val = Some(op_res); + //push op back into map + add_or_update_map_op(message_map, &dev_path, Some(current_op))?; + Ok(()) + } else { + Err(BynarError::from(format!( + "{} on host {} does not have a currently running operation!", + dev_path.display(), + host_info.hostname + ))) + } +} + +// check if all operations on a disk have finished (assuming SafeToRemove/Remove operations) +fn is_all_finished( + message_map: &mut HashMap>>, + dev_path: &PathBuf, +) -> BynarResult { + // check if all the other paths in disk are SafeToRemove (and not Success) + // check if all ops in the disk have finished + let disk = get_disk_map_op(message_map, &dev_path)?; + let mut all_finished = true; + disk.iter().for_each(|(_, v)| { + //check if value finished + if let Some(val) = v { + if let Some(ret) = &val.ret_val { + if !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) + { + all_finished = false; + } + } else { + all_finished = false; + } + } else { + all_finished = false; + } + }); + Ok(all_finished) +} + +// Open a ticket +fn open_jira_ticket( + message_map: &mut HashMap>>, + host_info: &Host, + pool: &Pool, + config: &ConfigSettings, + dev_path: &PathBuf, +) -> BynarResult<()> { + // get the path of the disk + let path = get_request_keys(&dev_path)?.0; + // get the current op associated with the disk + if let Some(current_op) = get_map_op(message_map, &path)? { + let description = match current_op.description { + Some(d) => d, + None => { + return Err(BynarError::from(format!( + "Disk {} on host {} is missing a description", + path.display(), + host_info.hostname + ))) + } + }; + let op_id = match current_op.operation_id { + None => { + error!("Operation not recorded for {}", path.display()); + 0 + } + Some(i) => i, + }; + //open JIRA ticket+ notify slack + debug!("Creating support ticket"); + // temporarily disable error out + match create_support_ticket(config, "Bynar: Dead disk", &description) { + Ok(ticket_id) => { + debug!("Recording ticket id {} in database", ticket_id); + // update operation details in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + } + Err(e) => { + let _ = notify_slack(config, &format!("Unable to create ticket {:?}", e)); + } + } + /* + let ticket_id = + create_support_ticket(config, "Bynar: Dead disk", &description)?; + debug!("Recording ticket id {} in database", ticket_id); + // update operation detials in DB + let mut operation_detail = + OperationDetail::new(op_id, OperationType::WaitingForReplacement); + operation_detail.set_tracking_id(ticket_id); + add_or_update_operation_detail(pool, &mut operation_detail)?; + */ + return Ok(()); + } + return Err(BynarError::from(format!( + "Disk {} on host {} is missing the current operation", + path.display(), + host_info.hostname + ))); +} + //handle return of Operation fn handle_operation_result( message_map: &mut HashMap>>, @@ -826,88 +747,49 @@ fn handle_operation_result( op_res: OpOutcomeResult, config: &ConfigSettings, ) -> BynarResult<()> { - match op_res.get_result() { - ResultType::OK => {} - ResultType::ERR => { - if op_res.has_error_msg() { - let msg = op_res.get_error_msg(); - match op_res.get_op_type() { - Op::Add => { - error!("Add disk failed : {}", msg); - return Err(BynarError::from(msg)); - } - Op::Remove => { - error!("Remove disk failed : {}", msg); - // return Err(BynarError::from(msg)); - // no need to error out, but update the map. Error outcomes are also expected for Remove, - // since remove might be run on the disk and the partition...or the input path is not in the - // config file - } - Op::SafeToRemove => { - error!("SafeToRemove disk failed : {}", msg); - // no need to error out, but update the map. Error outcomes are expected for SafeToRemove. - // Ex. you removed a disk first before the partition. - } - _ => {} - } + if let (ResultType::ERR, true) = (op_res.get_result(), op_res.has_error_msg()) { + let msg = op_res.get_error_msg(); + match op_res.get_op_type() { + Op::Add => { + error!("Add disk failed : {}", msg); + return Err(BynarError::from(msg)); } + Op::Remove => { + error!("Remove disk failed : {}", msg); + // no need to error out, but update the map. Error outcomes are also expected for Remove, + // since remove might be run on the disk and the partition...or the input path is not in the config file + } + Op::SafeToRemove => { + error!("SafeToRemove disk failed : {}", msg); + // no need to error out, but update the map. Error outcomes are expected for SafeToRemove. + // Ex. you removed a disk first before the partition. + } + _ => {} } } + let dev_path = PathBuf::from(op_res.get_disk()); match op_res.get_op_type() { Op::Add => { - let path = Path::new(op_res.get_disk()); - if let Some(disk_op) = get_map_op(message_map, &path.to_path_buf())? { + if let Some(disk_op) = get_map_op(message_map, &dev_path.to_path_buf())? { if let Some(ticket_id) = disk_op.description { handle_add_disk_res(pool, &op_res, ticket_id); //update result in the map (in otherwords, just set it to None) - remove_map_op(message_map, &path.to_path_buf())?; + remove_map_op(message_map, &dev_path.to_path_buf())?; return Ok(()); } } - error!("Unable to get current operation in the map for {}", path.display()); + error!("Unable to get current operation in the map for {}", dev_path.display()); Err(BynarError::from(format!( "Unable to get current operation in the map for {}", - path.display() + dev_path.display() ))) } Op::SafeToRemove => { // get the op from map, update it with outcome, handle errors as necessary (just store in map) - let dev_path = PathBuf::from(op_res.get_disk()); - if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { - current_op.ret_val = Some(op_res); - //push op back into map - add_or_update_map_op(message_map, &dev_path, Some(current_op))?; - } else { - return Err(BynarError::from(format!( - "{} on host {} does not have a currently running operation!", - dev_path.display(), - host_info.hostname - ))); - } - // check if all the other paths in disk are SafeToRemove (and not Success) - // check if all ops in the disk have finished - let disk = get_disk_map_op(message_map, &dev_path)?; - let mut all_finished = true; - disk.iter().for_each(|(_, v)| { - //check if value finished - if let Some(val) = v { - if let Some(ret) = &val.ret_val { - if !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove - || ret.get_op_type() == Op::Remove)) - { - all_finished = false; - } - } else { - all_finished = false; - } - } else { - all_finished = false; - } - }); + update_map_result(message_map, host_info, &dev_path, op_res)?; // if so, notify slack - if all_finished { + if is_all_finished(message_map, &dev_path)? { debug!("safe to remove: false"); let _ = notify_slack( config, @@ -918,59 +800,7 @@ fn handle_operation_result( host_info.hostname, ), ); - // get the path of the disk - let path = get_request_keys(&dev_path)?.0; - // get the current op associated with the disk - if let Some(current_op) = get_map_op(message_map, &path)? { - let description = match current_op.description { - Some(d) => d, - None => { - return Err(BynarError::from(format!( - "Disk {} on host {} is missing a description", - path.display(), - host_info.hostname - ))) - } - }; - let op_id = match current_op.operation_id { - None => { - error!("Operation not recorded for {}", path.display()); - 0 - } - Some(i) => i, - }; - //open JIRA ticket+ notify slack - debug!("Creating support ticket"); - // temporarily disable error out - match create_support_ticket(config, "Bynar: Dead disk", &description) { - Ok(ticket_id) => { - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; - } - Err(e) => { - let _ = - notify_slack(config, &format!("Unable to create ticket {:?}", e)); - } - } - /*let ticket_id = - create_support_ticket(config, "Bynar: Dead disk", &description)?; - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?;*/ - return Ok(()); - } - return Err(BynarError::from(format!( - "Disk {} on host {} is missing the current operation", - path.display(), - host_info.hostname - ))); + open_jira_ticket(message_map, host_info, pool, config, &dev_path)?; } //otherwise error.... Err(BynarError::from(format!( @@ -981,7 +811,6 @@ fn handle_operation_result( } Op::Remove => { //check if successful or not and send to slack - let dev_path = PathBuf::from(op_res.get_disk()); match op_res.get_outcome() { OpOutcome::Success => { debug!( @@ -1045,30 +874,9 @@ fn handle_operation_result( } } //update map - if let Some(mut current_op) = get_map_op(message_map, &dev_path)? { - current_op.ret_val = Some(op_res); - //push op back into map - add_or_update_map_op(message_map, &dev_path, Some(current_op))?; - } else { - return Err(BynarError::from(format!( - "{} on host {} does not have a currently running operation!", - dev_path.display(), - host_info.hostname - ))); - } - // check if all ops in the disk have finished - let disk = get_disk_map_op(message_map, &dev_path)?; - let mut all_finished = true; - disk.iter().for_each(|(_, v)| { - //check if value finished - if let Some(val) = v { - if val.ret_val.is_none() { - all_finished = false; - } - } - }); + update_map_result(message_map, host_info, &dev_path, op_res)?; //if all finished open ticket+ notify slack - if all_finished { + if is_all_finished(message_map, &dev_path)? { let _ = notify_slack( &config, &format!( @@ -1077,59 +885,7 @@ fn handle_operation_result( dev_path.display(), ), ); - // get the path of the disk - let path = get_request_keys(&dev_path)?.0; - - // get the current op associated with the disk - if let Some(current_op) = get_map_op(message_map, &path)? { - let description = match current_op.description { - Some(d) => d, - None => { - return Err(BynarError::from(format!( - "Disk {} is missing a description", - path.display() - ))) - } - }; - let op_id = match current_op.operation_id { - None => { - error!("Operation not recorded for {}", path.display()); - 0 - } - Some(i) => i, - }; - //open JIRA ticket+ notify slack - debug!("Creating support ticket"); - match create_support_ticket(config, "Bynar: Dead disk", &description) { - Ok(ticket_id) => { - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?; - } - Err(e) => { - let _ = - notify_slack(config, &format!("Unable to create ticket {:?}", e)); - } - } - // temporarily disable ticket erroring out - /* - let ticket_id = - create_support_ticket(config, "Bynar: Dead disk", &description)?; - debug!("Recording ticket id {} in database", ticket_id); - // update operation detials in DB - let mut operation_detail = - OperationDetail::new(op_id, OperationType::WaitingForReplacement); - operation_detail.set_tracking_id(ticket_id); - add_or_update_operation_detail(pool, &mut operation_detail)?;*/ - return Ok(()); - } - return Err(BynarError::from(format!( - "Disk {} is missing the current operation", - path.display() - ))); + open_jira_ticket(message_map, host_info, pool, config, &dev_path)?; } Ok(()) } From 53df069a5b7cc8908b3e381d63ed6d3916cf45b2 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 26 Feb 2020 14:47:31 -0500 Subject: [PATCH 54/76] Removed repeat code and replaced with function call --- src/lib/lib.rs | 140 ++++--------------------------------------------- src/main.rs | 31 ++++------- 2 files changed, 19 insertions(+), 152 deletions(-) diff --git a/src/lib/lib.rs b/src/lib/lib.rs index 1032906..0da7d99 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -70,40 +70,17 @@ pub fn add_disk_request( client_id: Vec, simulate: bool, ) -> BynarResult<()> { - // { - let mut o = Operation::new(); debug!("Creating add disk operation request"); - //send the id first - s.send(&client_id, zmq::SNDMORE)?; - + let mut o = Operation::new(); o.set_Op_type(Op::Add); o.set_disk(format!("{}", path.display())); o.set_simulate(simulate); if let Some(id) = id { o.set_osd_id(id); } - - let encoded = o.write_to_bytes()?; - debug!("Sending message"); - s.send(&encoded, 0)?; + debug!("Sending message in add_disk_request"); + request(s, o, client_id)?; Ok(()) - /*debug!("Waiting for response"); - let add_response = s.recv_bytes(0)?; - debug!("Decoding msg len: {}", add_response.len()); - let op_result = parse_from_bytes::(&add_response)?; - match op_result.get_result() { - ResultType::OK => Ok(op_result.get_outcome()), - ResultType::ERR => { - if op_result.has_error_msg() { - let msg = op_result.get_error_msg(); - error!("Add disk failed: {}", msg); - Err(BynarError::from(op_result.get_error_msg())) - } else { - error!("Add disk failed but error_msg not set"); - Err(BynarError::from("Add disk failed but error_msg not set")) - } - } - }*/ } /* @@ -133,56 +110,21 @@ pub fn list_disks_request(s: &Socket, client_id: Vec) -> BynarResult<()> { debug!("Printing ID {:?}", client_id); let mut o = Operation::new(); debug!("Creating list operation request"); - //send the id first o.set_Op_type(Op::List); - - debug!("Encoding as hex"); - let encoded = o.write_to_bytes()?; - debug!("Encoded value {:?}", encoded); - debug!("Sending message"); - - s.send(client_id, zmq::SNDMORE)?; - s.send(encoded, 0)?; - //(&[client_id, encoded], 0)?; - - //s.send(&client_id, zmq::SNDMORE)?; - //s.send("Send another message", zmq::SNDMORE)?; - //s.send(encoded, 0)?; + debug!("Sending message in list_disks_request"); + request(s, o, client_id)?; Ok(()) - /*debug!("Waiting for response"); - let disks_response = s.recv_bytes(0)?; - debug!("Decoding msg len: {}", disks_response.len()); - let disk_list = parse_from_bytes::(&disks_response)?; - - let mut d: Vec = Vec::new(); - for disk in disk_list.get_disk() { - d.push(disk.clone()); - } - - Ok(d)*/ } /// send safe-to-remove request to disk-manager pub fn safe_to_remove_request(s: &Socket, path: &Path, client_id: Vec) -> BynarResult<()> { - //<(OpOutcome, bool)> { let mut o = Operation::new(); - //send the id first - s.send(&client_id, zmq::SNDMORE)?; debug!("Creating safe to remove operation request"); o.set_Op_type(Op::SafeToRemove); o.set_disk(format!("{}", path.display())); - let encoded = o.write_to_bytes()?; - debug!("Sending message"); - s.send(&encoded, 0)?; + debug!("Sending message in safe_to_remove_request"); + request(s, o, client_id)?; Ok(()) - /*debug!("Waiting for response"); - let safe_response = s.recv_bytes(0)?; - debug!("Decoding msg len: {}", safe_response.len()); - let op_result = parse_from_bytes::(&safe_response)?; - match op_result.get_result() { - ResultType::OK => Ok((op_result.get_outcome(), op_result.get_value())), - ResultType::ERR => Err(BynarError::from(op_result.get_error_msg())), - }*/ } /// Send a remove disk request to the disk_manager @@ -193,45 +135,17 @@ pub fn remove_disk_request( client_id: Vec, simulate: bool, ) -> BynarResult<()> { - //BynarResult { let mut o = Operation::new(); debug!("Creating remove operation request"); - //send the id first - s.send(&client_id, zmq::SNDMORE)?; o.set_Op_type(Op::Remove); o.set_disk(format!("{}", path.display())); o.set_simulate(simulate); if let Some(osd_id) = id { o.set_osd_id(osd_id); } - - let encoded = o.write_to_bytes()?; - debug!("Sending message"); - s.send(encoded, 0)?; + debug!("Sending message in remove_disk_request"); + request(s, o, client_id)?; Ok(()) - /*debug!("Waiting for response"); - let remove_response = s.recv_bytes(0)?; - debug!("Decoding msg len: {}", remove_response.len()); - let op_result = match parse_from_bytes::(&remove_response) { - Err(e) => { - error!("Unable to Parse Message {:?}", e); - return Err(BynarError::from(e)); - } - Ok(o) => o, - }; - match op_result.get_result() { - ResultType::OK => Ok(op_result.get_outcome()), - ResultType::ERR => { - if op_result.has_error_msg() { - let msg = op_result.get_error_msg(); - error!("Remove disk failed: {}", msg); - Err(BynarError::from(op_result.get_error_msg())) - } else { - error!("Remove disk failed but error_msg not set"); - Err(BynarError::from("Remove disk failed but error_msg not set")) - } - } - }*/ } // default filename for daemon_output @@ -372,43 +286,9 @@ pub fn get_first_op_result(message: &mut Vec) -> Option { pub fn get_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { debug!("Printing ID {:?}", client_id); let mut o = Operation::new(); - //send the id first - s.send(&client_id, zmq::SNDMORE)?; debug!("calling get_jira_tickets "); o.set_Op_type(Op::GetCreatedTickets); - let encoded = o.write_to_bytes()?; - debug!("encoded {:?}", encoded); debug!("Sending message in get_jira_tickets"); - s.send(&encoded, 0)?; + request(s, o, client_id)?; Ok(()) - - /*debug!("Waiting for response: get_jira_tickets"); - let tickets_response = s.recv_bytes(0)?; - debug!("Decoding msg len: {}", tickets_response.len()); - - let op_jira_result = parse_from_bytes::(&tickets_response)?; - match op_jira_result.get_result() { - ResultType::OK => { - debug!("got tickets successfully"); - let proto_jira = op_jira_result.get_tickets(); - let mut _jira: Vec = Vec::new(); - for JiraInfo in proto_jira { - debug!("get_ticket_id: {}", JiraInfo.get_ticket_id()); - debug!("get_server_name: {}", JiraInfo.get_server_name()); - } - Ok(()) - } - ResultType::ERR => { - if op_jira_result.has_error_msg() { - let msg = op_jira_result.get_error_msg(); - error!("get jira tickets failed : {}", msg); - Err(BynarError::from(op_jira_result.get_error_msg())) - } else { - error!("Get jira tickets failed but error_msg not set"); - Err(BynarError::from( - "Get jira tickets failed but error_msg not set", - )) - } - } - }*/ } diff --git a/src/main.rs b/src/main.rs index 36db82a..e3831ec 100644 --- a/src/main.rs +++ b/src/main.rs @@ -732,11 +732,11 @@ fn open_jira_ticket( */ return Ok(()); } - return Err(BynarError::from(format!( + Err(BynarError::from(format!( "Disk {} on host {} is missing the current operation", path.display(), host_info.hostname - ))); + ))) } //handle return of Operation @@ -876,7 +876,7 @@ fn handle_operation_result( //update map update_map_result(message_map, host_info, &dev_path, op_res)?; //if all finished open ticket+ notify slack - if is_all_finished(message_map, &dev_path)? { + if is_all_finished(message_map, &dev_path)? { let _ = notify_slack( &config, &format!( @@ -917,9 +917,9 @@ fn send_and_recieve( if let Some((mess, desc, op_id)) = message_queue.pop_front() { // if mess.op_type() == Op::Remove, check if Safe-To-Remove in map complete // if not, send to end of queue (push_back) - let path = Path::new(mess.get_disk()).to_path_buf(); + let path = PathBuf::from(mess.get_disk()); //check if there was a previous request, and whether it was completed - if let Some(disk_op) = get_map_op(&message_map, &path.to_path_buf())? { + if let Some(disk_op) = get_map_op(&message_map, &path)? { // check if Safe-to-remove returned yet if let Some(val) = disk_op.ret_val { // check if mess is a Remove op @@ -1291,7 +1291,6 @@ fn main() { mod tests { use super::*; use block_utils::*; - // list of devices to use in some test functions fn get_devices() -> Vec { [ @@ -1326,14 +1325,8 @@ mod tests { .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true - }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") + (p.to_string_lossy().starts_with("sr") + || p.to_string_lossy().starts_with("loop")) } else { true }) @@ -1363,14 +1356,8 @@ mod tests { .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true - }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") + (p.to_string_lossy().starts_with("sr") + || p.to_string_lossy().starts_with("loop")) } else { true }) From c80adb2eb6f36f816a0854b2c783e8a9ea5518af Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 28 Feb 2020 09:12:17 -0500 Subject: [PATCH 55/76] pulled out repeat behavior into a macro --- src/disk_manager.rs | 85 +++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 9265ffa..8241be5 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -47,14 +47,7 @@ fn create_req_map() -> BynarResult>> { .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("sr") - } else { - true - }) - }) - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - p.to_string_lossy().starts_with("loop") + (p.to_string_lossy().starts_with("sr") || p.to_string_lossy().starts_with("loop")) } else { true }) @@ -81,14 +74,6 @@ macro_rules! get_op_pathbuf { // check if a disk already has a request. Return true if an op is already running (false otherwise or if // op is List or GetCreatedTickets) -fn is_op_running(req_map: &mut HashMap>, op: &Operation) -> bool { - // if op_type is List or GetCreatedTickets, return false - match op.get_Op_type() { - Op::List | Op::GetCreatedTickets => false, - _ => req_map.get(&get_op_pathbuf!(op)).is_some(), - } -} - macro_rules! op_running { ($req_map:expr,$op:expr) => {{ match $op.get_Op_type() { @@ -571,6 +556,41 @@ fn respond_to_client(result: &T, s: &Socket) -> BynarResul Ok(()) } +macro_rules! set_outcome_result { + (err => $result:ident, $outcome:expr) => {{ + $result.set_result(ResultType::ERR); + $result.set_error_msg($outcome); + }}; + (out =>$result:ident, $outcome:expr) => {{ + $result.set_result(ResultType::OK); + $result.set_outcome($outcome); + }}; + (out => $result:ident, $outcome:expr, $val:expr) => {{ + $result.set_value($val); + set_outcome_result!(out => $result, $outcome) + }}; +} + +#[test] +fn test_set_outcome_result(){ + let mut result = OpOutcomeResult::new(); + result.set_disk("/dev/sdc".to_string()); + result.set_op_type(Op::Add); + set_outcome_result!(err => result, "Error was set".to_string()); + println!("Error Outcome: {:#?}", result); + let mut result = OpOutcomeResult::new(); + result.set_disk("/dev/sdc".to_string()); + result.set_op_type(Op::Add); + let outcome = OpOutcome::Success; + set_outcome_result!(out => result, outcome); + println!("Success Outcome: {:#?}", result); + let mut result = OpOutcomeResult::new(); + result.set_disk("/dev/sdc".to_string()); + result.set_op_type(Op::Add); + set_outcome_result!(out => result, outcome, true); + println!("Success Outcome: {:#?}", result); +} + // add disk request function. Send the result through the sender channel back to the main thread. fn add_disk( sender: &crossbeam_channel::Sender<(Vec, OpOutcomeResult)>, @@ -586,9 +606,7 @@ fn add_disk( let backend = match backend::load_backend(backend, Some(config_dir)) { Ok(backend) => backend, Err(e) => { - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - + set_outcome_result!(err => result, e.to_string()); // Bail early. We can't load the backend let _ = sender.send((client_id, result)); return Ok(()); @@ -598,12 +616,10 @@ fn add_disk( //Send back OpOutcomeResult match backend.add_disk(&Path::new(d), id, false) { Ok(outcome) => { - result.set_outcome(outcome); - result.set_result(ResultType::OK); + set_outcome_result!(out => result, outcome); } Err(e) => { - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); + set_outcome_result!(err => result, e.to_string()); } }; let _ = sender.send((client_id, result)); @@ -672,11 +688,6 @@ fn list_disks( let mut disks = Disks::new(); disks.set_disk(RepeatedField::from_vec(disk_list)); - /*debug!("Encoding disk list"); - let encoded = disks.write_to_bytes()?; - - debug!("Responding to client with msg len: {}", encoded.len()); - s.send(&encoded, 0)?;*/ let _ = c.send((client_id, disks)); Ok(()) } @@ -695,9 +706,7 @@ fn remove_disk( let backend = match backend::load_backend(backend, Some(config_dir)) { Ok(b) => b, Err(e) => { - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - + set_outcome_result!(err => result, e.to_string()); // Bail early. We can't load the backend let _ = sender.send((client_id, result)); return Ok(()); @@ -705,12 +714,10 @@ fn remove_disk( }; match backend.remove_disk(&Path::new(d), false) { Ok(outcome) => { - result.set_outcome(outcome); - result.set_result(ResultType::OK); + set_outcome_result!(out => result, outcome); } Err(e) => { - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); + set_outcome_result!(err => result, e.to_string()); } }; let _ = sender.send((client_id, result)); @@ -724,7 +731,6 @@ fn safe_to_remove( ) -> BynarResult<(OpOutcome, bool)> { let backend = backend::load_backend(backend, Some(config_dir))?; let safe = backend.safe_to_remove(d, false)?; - Ok(safe) } @@ -742,14 +748,11 @@ fn safe_to_remove_disk( match safe_to_remove(&Path::new(d), &backend, &config_dir) { Ok((outcome, val)) => { debug!("Safe to remove: {}", val); - result.set_result(ResultType::OK); - result.set_value(val); - result.set_outcome(outcome); + set_outcome_result!(out => result, outcome, val); } Err(e) => { debug!("Safe to remove err: {}", e); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); + set_outcome_result!(err => result, e.to_string()); let _ = sender.send((client_id, result)); return Err(BynarError::new(format!("safe to remove error: {}", e))); } From cc798ffac40b0edfd1cbbf12a3eb44c59c9a3594 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 28 Feb 2020 10:11:37 -0500 Subject: [PATCH 56/76] clean up some redundant code --- src/disk_manager.rs | 114 ++++++++++++++++---------------------------- 1 file changed, 42 insertions(+), 72 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 8241be5..8304391 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -19,6 +19,8 @@ use api::service::{ mod backend; mod in_progress; mod test_disk; +#[macro_use] +mod util; use crate::backend::BackendType; use block_utils::{Device, MediaType}; @@ -47,7 +49,7 @@ fn create_req_map() -> BynarResult>> { .into_iter() .filter(|b| { !(if let Some(p) = b.as_path().file_name() { - (p.to_string_lossy().starts_with("sr") || p.to_string_lossy().starts_with("loop")) + (p.to_string_lossy().starts_with("sr") || p.to_string_lossy().starts_with("loop")) } else { true }) @@ -101,6 +103,21 @@ macro_rules! op_running { }}; } +macro_rules! set_outcome_result { + (err => $result:ident, $outcome:expr) => {{ + $result.set_result(ResultType::ERR); + $result.set_error_msg($outcome); + }}; + (ok =>$result:ident, $outcome:expr) => {{ + $result.set_result(ResultType::OK); + $result.set_outcome($outcome); + }}; + (ok => $result:ident, $outcome:expr, $val:expr) => {{ + $result.set_value($val); + set_outcome_result!(ok => $result, $outcome) + }}; +} + // Note: if the operation is List or GetCreatedTickets, skip adding it to the map // REQUIRES: assert!(!is_op_running(req_map, op)) // ENSURES: assert!(is_op_running(req_map, op)) (if op_type != List || GetCreatedTickets) @@ -221,21 +238,14 @@ fn setup_curve(s: &Socket, config_dir: &Path, vault: bool) -> BynarResult<()> { fn op_no_disk(responder: &Socket, op: &Operation) -> bool { if !op.has_disk() { match op.get_Op_type() { - Op::Add => error!("Add operation must include disk field. Ignoring request"), - Op::AddPartition => { - error!("Add Partition operation must include disk field. Ignoring request") - } - Op::Remove => error!("Remove operation must include disk field. Ignoring request"), - Op::SafeToRemove => { - error!("Safe to remove operation must include disk field. Ignoring request") + Op::Add | Op::AddPartition | Op::Remove | Op::SafeToRemove => { + error!("Add operation must include disk field. Ignoring request") } _ => return false, } // We still have to respond with an error message let mut result = OpOutcomeResult::new(); - result.set_result(ResultType::ERR); - result.set_error_msg("missing operation field in protocol. Ignoring request".to_string()); - + set_outcome_result!(err => result, "missing operation field in protocol. Ignoring request".to_string()); let _ = respond_to_client(&result, &responder); return true; } @@ -277,14 +287,7 @@ fn listen( pool.scope(|s| 'outer: loop { if let Ok(responder) = responder.try_lock() { let now = Instant::now(); - let events = match responder.get_events() { - Err(zmq::Error::EBUSY) => { - trace!("Socket Busy, skip"); - continue; - } - Err(e) => return Err(BynarError::from(e)), - Ok(e) => e as zmq::PollEvents, - }; + let events = poll_events!(responder, continue); // is the socket readable? if events.contains(zmq::PollEvents::POLLIN) { //get the id first {STREAM sockets get messages with id prepended} @@ -318,13 +321,10 @@ fn listen( // check if op is currently running. If so, skip it if op_running!(&req_map, &operation) { trace!("Operation {:?} cannot be run, disk is already running an operation", operation); - //build OpOutcomeResult with SkipRepeat, send to output? let mut op_res = OpOutcomeResult::new(); op_res.set_disk(operation.get_disk().to_string()); - op_res.set_outcome(OpOutcome::SkipRepeat); op_res.set_op_type(operation.get_Op_type()); - op_res.set_result(ResultType::OK); - op_res.set_value(false); + set_outcome_result!(ok => op_res, OpOutcome::SkipRepeat, false); let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks continue; } @@ -399,9 +399,7 @@ fn listen( } Ok((OpOutcome::Skipped, val)) => { debug!("Disk skipped"); - result.set_outcome(OpOutcome::Skipped); - result.set_value(val); - result.set_result(ResultType::OK); + set_outcome_result!(ok => result, OpOutcome::Skipped, val); let _ = send_res.send((client_id, result)); } Ok((OpOutcome::SkipRepeat, val)) => { @@ -465,13 +463,11 @@ fn listen( } } } - // send completed requests (or error messages) if events.contains(zmq::PollEvents::POLLOUT) { //check disks first, since those are faster requests than add/remove reqs match recv_disk.try_recv() { Ok((client_id, result)) => { // send result back to client - //send client id back first let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } @@ -479,14 +475,12 @@ fn listen( // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) match recv_ticket.try_recv() { Ok((client_id, result)) => { - // send result back to client let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } Err(_) => { // no disks in the queue, check if any add/remove/safe-to-remove req results if let Ok((client_id, result)) = recv_res.try_recv() { - // send result back to client //check if result is SkipRepeat, if so, skipp the assert! and insert debug!("Send {:?}", result); if OpOutcome::SkipRepeat != result.get_outcome() { @@ -506,23 +500,18 @@ fn listen( for signal in signals.pending() { match signal as c_int { signal_hook::SIGHUP => { - //Reload the config file - debug!("Reload Config File"); - let config_file = - helpers::load_config(config_dir, "disk-manager.json"); - if let Err(e) = config_file { - error!( - "Failed to load config file {}. error: {}", - config_dir.join("disk-manager.json").display(), - e - ); - return Ok(()); - } - let config: DiskManagerConfig = - config_file.expect("Failed to load config"); + // Don't actually need to reload the config, since it gets reloaded on every call to backend... + debug!("Requested to reload config file"); + let config: DiskManagerConfig = match helpers::load_config(&config_dir, "disk-manager.json") { + Ok(p) => p, + Err(e) => { + error!("Failed to load config file {}", e); + continue + } + }; notify_slack( &config, - &"Reload disk-manager config file".to_string(), + &"Requested to reload config, ignoring request: config changes already loaded".to_string(), ) .expect("Unable to connect to slack"); } @@ -556,23 +545,8 @@ fn respond_to_client(result: &T, s: &Socket) -> BynarResul Ok(()) } -macro_rules! set_outcome_result { - (err => $result:ident, $outcome:expr) => {{ - $result.set_result(ResultType::ERR); - $result.set_error_msg($outcome); - }}; - (out =>$result:ident, $outcome:expr) => {{ - $result.set_result(ResultType::OK); - $result.set_outcome($outcome); - }}; - (out => $result:ident, $outcome:expr, $val:expr) => {{ - $result.set_value($val); - set_outcome_result!(out => $result, $outcome) - }}; -} - #[test] -fn test_set_outcome_result(){ +fn test_set_outcome_result() { let mut result = OpOutcomeResult::new(); result.set_disk("/dev/sdc".to_string()); result.set_op_type(Op::Add); @@ -582,12 +556,12 @@ fn test_set_outcome_result(){ result.set_disk("/dev/sdc".to_string()); result.set_op_type(Op::Add); let outcome = OpOutcome::Success; - set_outcome_result!(out => result, outcome); + set_outcome_result!(ok => result, outcome); println!("Success Outcome: {:#?}", result); let mut result = OpOutcomeResult::new(); result.set_disk("/dev/sdc".to_string()); result.set_op_type(Op::Add); - set_outcome_result!(out => result, outcome, true); + set_outcome_result!(ok => result, outcome, true); println!("Success Outcome: {:#?}", result); } @@ -616,7 +590,7 @@ fn add_disk( //Send back OpOutcomeResult match backend.add_disk(&Path::new(d), id, false) { Ok(outcome) => { - set_outcome_result!(out => result, outcome); + set_outcome_result!(ok => result, outcome); } Err(e) => { set_outcome_result!(err => result, e.to_string()); @@ -714,7 +688,7 @@ fn remove_disk( }; match backend.remove_disk(&Path::new(d), false) { Ok(outcome) => { - set_outcome_result!(out => result, outcome); + set_outcome_result!(ok => result, outcome); } Err(e) => { set_outcome_result!(err => result, e.to_string()); @@ -748,7 +722,7 @@ fn safe_to_remove_disk( match safe_to_remove(&Path::new(d), &backend, &config_dir) { Ok((outcome, val)) => { debug!("Safe to remove: {}", val); - set_outcome_result!(out => result, outcome, val); + set_outcome_result!(ok => result, outcome, val); } Err(e) => { debug!("Safe to remove err: {}", e); @@ -771,9 +745,7 @@ pub fn get_jira_tickets( Ok(p) => p, Err(e) => { error!("Failed to load config file {}", e); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - + set_outcome_result!(err => result, e.to_string()); // unable to load config file let _ = sender.send((client_id, result)); return Ok(()); @@ -784,9 +756,7 @@ pub fn get_jira_tickets( Ok(p) => p, Err(e) => { error!("Failed to create database pool {}", e); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - + set_outcome_result!(err => result, e.to_string()); // unable to create DB connection let _ = sender.send((client_id, result)); return Ok(()); From 3e384cb03843170b60f0996809a48751d0557b59 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 28 Feb 2020 15:33:17 -0500 Subject: [PATCH 57/76] Fixed up returning an error instead of ok, and revamped the listen function and reorganized it for easier comprehension --- src/disk_manager.rs | 273 +++++++++++++++++++++++++++++++++++++++++++- src/main.rs | 7 +- 2 files changed, 268 insertions(+), 12 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 8304391..3891f79 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -1,6 +1,6 @@ use serde_derive::*; -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::fs; use std::fs::{create_dir, read_to_string, File}; use std::io::{Error, ErrorKind, Write}; @@ -275,7 +275,6 @@ fn listen( debug!("Building thread pool"); //Note, for now we are using 16 threads by default let pool = rayon::ThreadPoolBuilder::new().num_threads(16).build()?; - let responder = Arc::new(Mutex::new(responder)); // channel to send results from backend to main thread let (send_res, recv_res) = crossbeam_channel::unbounded::<(Vec, OpOutcomeResult)>(); let (send_disk, recv_disk) = crossbeam_channel::unbounded::<(Vec, Disks)>(); @@ -284,7 +283,268 @@ fn listen( debug!("Create request map"); let mut req_map = create_req_map()?; - pool.scope(|s| 'outer: loop { + let mut messages: VecDeque<(Operation, Vec)> = VecDeque::new(); + loop { + let now = Instant::now(); + let events = poll_events!(responder, continue); + // is the socket readable? + if events.contains(zmq::PollEvents::POLLIN) { + //get the id first {STREAM sockets get messages with id prepended} + let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly + trace!("Client ID {:?}", client_id); + // get actual message + while responder.get_rcvmore()? { + let mut msg = responder.recv_bytes(0)?; + debug!("Got msg len: {}", msg.len()); + trace!("Parsing msg {:?} as hex", msg); + if msg.is_empty() { + continue; + } + while !msg.is_empty() { + let operation = match parse_from_bytes::(&msg.clone()) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + continue; + } + }; + let size = operation.write_to_bytes()?.len(); + msg.drain((msg.len() - size)..msg.len()); + let client_id = client_id.clone(); + debug!("Operation requested: {:?}", operation.get_Op_type()); + if op_no_disk(&responder, &operation) { + continue; + } + // check if op is currently running. If so, skip it + if op_running!(&req_map, &operation) { + trace!( + "Operation {:?} cannot be run, disk is already running an operation", + operation + ); + let mut op_res = OpOutcomeResult::new(); + op_res.set_disk(operation.get_disk().to_string()); + op_res.set_op_type(operation.get_Op_type()); + set_outcome_result!(ok => op_res, OpOutcome::SkipRepeat, false); + let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks + continue; + } + op_insert(&mut req_map, &operation); + messages.push_back((operation, client_id)); + } + } + } + if !messages.is_empty() { + for _ in 0..messages.len() { + let (operation, client_id) = messages.pop_front().unwrap(); //this should be safe assuming !empty + let client_id = client_id.clone(); + let (send_res, send_disk, send_ticket) = + (send_res.clone(), send_disk.clone(), send_ticket.clone()); + let backend_type = backend_type.clone(); + let config_dir = config_dir.to_path_buf(); + match operation.get_Op_type() { + Op::Add => { + let id = if operation.has_osd_id() { + Some(operation.get_osd_id()) + } else { + None + }; + pool.spawn(move || { + let disk = operation.get_disk(); + match add_disk( + &send_res, + disk, + &backend_type, + id, + config_dir.to_path_buf(), + client_id, + ) { + Ok(_) => { + info!("Add disk finished"); + } + Err(e) => { + error!("Add disk error: {:?}", e); + } + } + }); + } + Op::AddPartition => { + // + } + Op::List => { + pool.spawn(move || { + match list_disks(&send_disk, client_id) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } + }; + }); + } + Op::Remove => { + let mut result = OpOutcomeResult::new(); + result.set_disk(operation.get_disk().to_string()); + result.set_op_type(Op::Remove); + + pool.spawn(move || { + match safe_to_remove( + &Path::new(operation.get_disk()), + &backend_type, + &config_dir, + ) { + Ok((OpOutcome::Success, true)) => { + match remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + &config_dir, + client_id, + ) { + Ok(_) => { + info!("Remove disk finished"); + } + Err(e) => { + error!("Remove disk error: {:?}", e); + } + }; + } + Ok((OpOutcome::Skipped, val)) => { + debug!("Disk skipped"); + set_outcome_result!(ok => result, OpOutcome::Skipped, val); + let _ = send_res.send((client_id, result)); + } + Ok((OpOutcome::SkipRepeat, val)) => { + debug!("Disk skipped, safe to remove already ran"); + result.set_outcome(OpOutcome::SkipRepeat); + result.set_value(val); + result.set_result(ResultType::OK); + let _ = send_res.send((client_id, result)); + } + Ok((_, false)) => { + debug!("Disk is not safe to remove"); + //Response to client + result.set_value(false); + result.set_outcome(OpOutcome::Success); + result.set_result(ResultType::ERR); + result.set_error_msg("Not safe to remove disk".to_string()); + let _ = send_res.send((client_id, result)); + } + Err(e) => { + error!("safe to remove failed: {:?}", e); + // Response to client + result.set_value(false); + result.set_result(ResultType::ERR); + result.set_error_msg(e.to_string()); + let _ = send_res.send((client_id, result)); + } + }; + }); + } + Op::SafeToRemove => { + pool.spawn(move || { + match safe_to_remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + &config_dir, + client_id, + ) { + Ok(_) => { + info!("Safe to remove disk finished"); + } + Err(e) => { + error!("Safe to remove error: {:?}", e); + } + }; + }); + } + Op::GetCreatedTickets => { + match get_jira_tickets(&send_ticket, &config_dir, client_id) { + Ok(_) => { + info!("Fetching jira tickets finished"); + } + Err(e) => { + error!("Fetching jira error: {:?}", e); + } + }; + } + } + } + } + if events.contains(zmq::PollEvents::POLLOUT) { + //check disks first, since those are faster requests than add/remove reqs + match recv_disk.try_recv() { + Ok((client_id, result)) => { + // send result back to client + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) + match recv_ticket.try_recv() { + Ok((client_id, result)) => { + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // no disks in the queue, check if any add/remove/safe-to-remove req results + if let Ok((client_id, result)) = recv_res.try_recv() { + //check if result is SkipRepeat, if so, skipp the assert! and insert + debug!("Send {:?}", result); + if OpOutcome::SkipRepeat != result.get_outcome() { + assert!(op_running!(req_map, &result, true)); + req_map.insert(get_op_pathbuf!(&result), None); + // set entry in req_map to None + } + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + } + } + } + } + } + if daemon { + while now.elapsed() < Duration::from_millis(10) { + for signal in signals.pending() { + match signal as c_int { + signal_hook::SIGHUP => { + // Don't actually need to reload the config, since it gets reloaded on every call to backend... + debug!("Requested to reload config file"); + let config: DiskManagerConfig = + match helpers::load_config(&config_dir, "disk-manager.json") { + Ok(p) => p, + Err(e) => { + error!("Failed to load config file {}", e); + continue; + } + }; + notify_slack( + &config, + &"Requested to reload config, ignoring request: config changes already loaded".to_string(), + ) + .expect("Unable to connect to slack"); + } + signal_hook::SIGINT | signal_hook::SIGCHLD => { + //skip this + debug!("Ignore signal"); + continue; + } + signal_hook::SIGTERM => { + //"gracefully" exit + debug!("Exit Process"); + break; + } + _ => unreachable!(), + } + } + } + } else { + std::thread::sleep(Duration::from_millis(10)); + } + } + /*pool.scope(|s| 'outer: loop { if let Ok(responder) = responder.try_lock() { let now = Instant::now(); let events = poll_events!(responder, continue); @@ -329,6 +589,7 @@ fn listen( continue; } op_insert(&mut req_map, &operation); + match operation.get_Op_type() { Op::Add => { let id = if operation.has_osd_id() { @@ -533,7 +794,7 @@ fn listen( std::thread::sleep(Duration::from_millis(10)); } } - })?; + })?;*/ Ok(()) } @@ -571,13 +832,13 @@ fn add_disk( d: &str, backend: &BackendType, id: Option, - config_dir: &Path, + config_dir: PathBuf, client_id: Vec, ) -> BynarResult<()> { let mut result = OpOutcomeResult::new(); result.set_disk(d.to_string()); result.set_op_type(Op::Add); - let backend = match backend::load_backend(backend, Some(config_dir)) { + let backend = match backend::load_backend(backend, Some(&config_dir)) { Ok(backend) => backend, Err(e) => { set_outcome_result!(err => result, e.to_string()); diff --git a/src/main.rs b/src/main.rs index e3831ec..070e15d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -802,12 +802,7 @@ fn handle_operation_result( ); open_jira_ticket(message_map, host_info, pool, config, &dev_path)?; } - //otherwise error.... - Err(BynarError::from(format!( - "{} on host {} does not have a currently running operation!", - dev_path.display(), - host_info.hostname - ))) + Ok(()) } Op::Remove => { //check if successful or not and send to slack From f54c7094b3f88fbaee06300e0daccc24817f1894 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 2 Mar 2020 14:58:31 -0500 Subject: [PATCH 58/76] Fix issue with is_all_finished not correctly determining when all operations on a disk have completed (either as successful or failed) --- src/main.rs | 66 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 9 deletions(-) diff --git a/src/main.rs b/src/main.rs index 070e15d..ffd26a6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -658,10 +658,12 @@ fn is_all_finished( let mut all_finished = true; disk.iter().for_each(|(_, v)| { //check if value finished + // if OpOutcome:: Success and OpSafeToRemove, then false + // if OpOutcome:: Success + Op::Remove, is fine? if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) + if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -717,7 +719,7 @@ fn open_jira_ticket( add_or_update_operation_detail(pool, &mut operation_detail)?; } Err(e) => { - let _ = notify_slack(config, &format!("Unable to create ticket {:?}", e)); + let _ = notify_slack(config, &format!("Unable to create ticket {:?} with description:\n {}", e, description)); } } /* @@ -2328,8 +2330,10 @@ mod tests { disk_paths.iter().for_each(|path| { let mut safe_to_rem = OpOutcomeResult::new(); let mut op = Operation::new(); + safe_to_rem.set_result(ResultType::OK); safe_to_rem.set_outcome(OpOutcome::Skipped); if path == &PathBuf::from("/dev/sda") { + safe_to_rem.set_outcome(OpOutcome::Success); safe_to_rem.set_op_type(Op::Remove); op.set_Op_type(Op::Remove); } else { @@ -2347,9 +2351,8 @@ mod tests { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove - || ret.get_op_type() == Op::Remove)) + if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -2375,6 +2378,7 @@ mod tests { disk_paths.iter().for_each(|path| { let mut safe_to_rem = OpOutcomeResult::new(); let mut op = Operation::new(); + safe_to_rem.set_result(ResultType::OK); if path == &PathBuf::from("/dev/sda2") { safe_to_rem.set_outcome(OpOutcome::Success); } else { @@ -2398,9 +2402,8 @@ mod tests { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove - || ret.get_op_type() == Op::Remove)) + if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -2413,4 +2416,49 @@ mod tests { }); assert!(!all_finished); } + #[test] + // test all finished check where disk_map is finished and everything error'd + fn test_all_finished_err() { + let mut disk_map: HashMap> = HashMap::new(); + let mut disk_map: HashMap> = HashMap::new(); + + let disk_paths = + [PathBuf::from("/dev/sda"), PathBuf::from("/dev/sda1"), PathBuf::from("/dev/sda2")] + .to_vec(); + disk_paths.iter().for_each(|path| { + let mut safe_to_rem = OpOutcomeResult::new(); + let mut op = Operation::new(); + safe_to_rem.set_result(ResultType::ERR); + if path == &PathBuf::from("/dev/sda") { + safe_to_rem.set_op_type(Op::Remove); + op.set_Op_type(Op::Remove); + } else { + safe_to_rem.set_op_type(Op::SafeToRemove); + op.set_Op_type(Op::SafeToRemove); + } + let mut disk_op = DiskOp::new(op, None, None); + disk_op.ret_val = Some(safe_to_rem); + disk_map.insert(path.to_path_buf(), Some(disk_op)); + }); + println!("Initial Disk Map: {:#?}", disk_map); + + let mut all_finished = true; + disk_map.iter().for_each(|(k, v)| { + //check if value finished + if let Some(val) = v { + if let Some(ret) = &val.ret_val { + if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + { + all_finished = false; + } + } else { + all_finished = false; + } + } else { + all_finished = false; + } + }); + assert!(all_finished); + } } From 5070465f82e91a8286c2baf2cb29aceb46cc71dd Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 3 Mar 2020 10:16:46 -0500 Subject: [PATCH 59/76] Update toml's GPT crate --- Cargo.lock | 579 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 280 insertions(+), 301 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 724e704..72b90ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,7 +10,7 @@ name = "aho-corasick" version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -25,8 +25,8 @@ dependencies = [ name = "api" version = "0.1.0" dependencies = [ - "protobuf 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "protobuf-codegen-pure 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf-codegen-pure 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -57,8 +57,8 @@ name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -74,12 +74,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "backtrace" -version = "0.3.43" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -89,7 +89,7 @@ version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -130,7 +130,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "cexpr 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "clang-sys 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -175,7 +175,7 @@ source = "git+https://github.com/cholcombe973/blkid.git#cbe2d00e62452a5376501b1d dependencies = [ "blkid-sys 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "errno 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -204,7 +204,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "nom 4.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "shellscript 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "udev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -239,14 +239,14 @@ dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)", - "gpt 1.0.0 (git+https://github.com/mzhong1/gpt)", + "gpt 1.0.0 (git+https://github.com/Quyzi/gpt)", "hashicorp_vault 0.6.1 (git+https://github.com/cholcombe973/vault-rs.git)", "hostname 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "init-daemon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "json 0.11.15 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libatasmart 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "libredfish 0.1.0 (git+https://github.com/cholcombe973/libredfish?branch=generic)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lvm 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -256,7 +256,7 @@ dependencies = [ "pnet 0.23.1 (registry+https://github.com/rust-lang/crates.io-index)", "postgres 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "postgres-shared 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "protobuf 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", "pwd 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "r2d2 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", "r2d2_postgres 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -265,7 +265,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "signal-hook 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "simplelog 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "slack-hook 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -326,13 +326,13 @@ source = "git+https://github.com/mzhong1/ceph-rust#27194ca6c0d3f4d321c8c04df533d dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", - "nom 5.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -346,7 +346,7 @@ dependencies = [ [[package]] name = "cfg-if" -version = "0.1.10" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -365,7 +365,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -416,7 +416,7 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -428,7 +428,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -449,7 +449,7 @@ name = "crc32fast" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -457,20 +457,21 @@ name = "crossbeam" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-channel" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -484,11 +485,12 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -497,7 +499,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -507,23 +509,16 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -531,8 +526,8 @@ name = "crossbeam-queue" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -540,25 +535,16 @@ name = "crossbeam-utils" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-utils" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -577,7 +563,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "boxfnonce 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -603,7 +589,7 @@ name = "dirs" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -613,7 +599,7 @@ name = "dirs" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -622,8 +608,8 @@ name = "dirs-sys" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -651,7 +637,7 @@ name = "encoding_rs" version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -672,7 +658,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "errno-dragonfly 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -682,7 +668,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -695,16 +681,16 @@ name = "error-chain" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "error-chain" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -712,7 +698,7 @@ name = "failure" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)", "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -721,9 +707,9 @@ name = "failure_derive" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -747,9 +733,9 @@ name = "flate2" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -830,8 +816,8 @@ name = "getrandom" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -851,7 +837,7 @@ dependencies = [ "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde-xml-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "unix_socket 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -865,14 +851,14 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gpt" version = "1.0.0" -source = "git+https://github.com/mzhong1/gpt#f6c50d78355fedcb27be71fc9fd00218d63edffb" +source = "git+https://github.com/Quyzi/gpt#62501874dbc7956ca807c9bc3eb177fa1eb5b978" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -890,11 +876,11 @@ dependencies = [ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -910,16 +896,16 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hermit-abi" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -946,7 +932,7 @@ name = "hostname" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winutil 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1003,7 +989,7 @@ dependencies = [ "relay 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1030,12 +1016,12 @@ dependencies = [ "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1048,7 +1034,7 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1073,7 +1059,7 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1089,7 +1075,7 @@ name = "iovec" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1131,11 +1117,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lexical-core" -version = "0.4.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1155,12 +1142,12 @@ name = "libatasmart-sys" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libc" -version = "0.2.66" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1181,7 +1168,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1189,7 +1176,7 @@ name = "libudev-sys" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1198,7 +1185,7 @@ name = "lock_api" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1214,7 +1201,7 @@ name = "log" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1256,12 +1243,12 @@ name = "memchr" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "memchr" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1294,7 +1281,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mime_guess" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1314,12 +1301,12 @@ name = "mio" version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1333,7 +1320,7 @@ version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1361,9 +1348,9 @@ name = "mocktopus_macros" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1372,12 +1359,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.27 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.28 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1388,8 +1375,8 @@ name = "net2" version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1399,8 +1386,8 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1411,8 +1398,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1434,17 +1421,17 @@ name = "nom" version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "nom" -version = "5.1.0" +version = "5.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lexical-core 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1469,20 +1456,20 @@ name = "num_cpus" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "openssl" -version = "0.10.27" +version = "0.10.28" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1498,7 +1485,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1532,9 +1519,9 @@ name = "parking_lot_core" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1546,9 +1533,9 @@ name = "parking_lot_core" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1628,7 +1615,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ipnetwork 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "pnet_base 0.22.0 (registry+https://github.com/rust-lang/crates.io-index)", "pnet_sys 0.25.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1669,7 +1656,7 @@ name = "pnet_sys" version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1679,7 +1666,7 @@ name = "pnet_sys" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1689,7 +1676,7 @@ name = "pnet_transport" version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "pnet_base 0.22.0 (registry+https://github.com/rust-lang/crates.io-index)", "pnet_packet 0.23.1 (registry+https://github.com/rust-lang/crates.io-index)", "pnet_sys 0.23.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1752,7 +1739,7 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1760,24 +1747,24 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.10.1" +version = "2.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "protobuf-codegen" -version = "2.10.1" +version = "2.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "protobuf 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "protobuf-codegen-pure" -version = "2.10.1" +version = "2.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "protobuf 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "protobuf-codegen 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "protobuf-codegen 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1785,7 +1772,7 @@ name = "publicsuffix" version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1798,7 +1785,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1824,7 +1811,7 @@ name = "quote" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1852,7 +1839,7 @@ name = "rand" version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1862,7 +1849,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1874,7 +1861,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1892,7 +1879,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1966,7 +1953,7 @@ name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1978,7 +1965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2016,9 +2003,9 @@ name = "rayon-core" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2052,15 +2039,15 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "utf8-ranges 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.6.14" +version = "0.6.16" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2096,17 +2083,17 @@ dependencies = [ "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2120,7 +2107,7 @@ dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2158,7 +2145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "schannel" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2185,7 +2172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "scopeguard" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2195,7 +2182,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2243,14 +2230,14 @@ name = "serde_derive" version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.46" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2290,7 +2277,7 @@ name = "signal-hook" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2300,7 +2287,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2339,7 +2326,7 @@ dependencies = [ "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2366,8 +2353,8 @@ name = "socket2" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2411,10 +2398,10 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2432,9 +2419,9 @@ name = "synstructure" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2452,7 +2439,7 @@ name = "syntex_errors" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_pos 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2474,7 +2461,7 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_errors 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2502,8 +2489,8 @@ name = "tempfile" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2558,7 +2545,7 @@ name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2572,18 +2559,18 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2598,12 +2585,12 @@ dependencies = [ [[package]] name = "tokio-codec" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2618,43 +2605,43 @@ dependencies = [ "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-current-thread" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-executor" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-fs" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-io" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2675,16 +2662,16 @@ dependencies = [ "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-reactor" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2692,9 +2679,9 @@ dependencies = [ "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2707,7 +2694,7 @@ dependencies = [ [[package]] name = "tokio-sync" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2716,73 +2703,73 @@ dependencies = [ [[package]] name = "tokio-tcp" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-threadpool" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-timer" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-udp" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-uds" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2805,7 +2792,7 @@ name = "try_from" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2818,7 +2805,7 @@ name = "udev" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "libudev-sys 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2827,7 +2814,7 @@ name = "uname" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2884,8 +2871,8 @@ name = "unix_socket" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2927,7 +2914,7 @@ name = "uuid" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2959,11 +2946,6 @@ name = "vec_map" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "version_check" version = "0.9.1" @@ -3004,7 +2986,7 @@ name = "which" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3083,7 +3065,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "zmq-sys 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3093,7 +3075,7 @@ name = "zmq-sys" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "metadeps 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3108,7 +3090,7 @@ dependencies = [ "checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum backtrace 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)" = "7f80256bc78f67e7df7e36d77366f636ed976895d91fe2ab9efa3973e8fe8c4f" +"checksum backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)" = "a4ed64ae6d9ebfd9893193c4b2532b1292ec97bd8271c9d7d0fa90cd78a34cba" "checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" "checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" @@ -3134,7 +3116,7 @@ dependencies = [ "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum ceph 3.2.0 (git+https://github.com/mzhong1/ceph-rust)" = "" "checksum cexpr 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "42aac45e9567d97474a834efdee3081b3c942b2205be932092f53354ce503d6c" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" "checksum chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" "checksum clang-sys 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7f7c04e52c35222fffcc3a115b5daf5f7e2bfb71c13c4e2321afe1fc71859c2" "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" @@ -3147,16 +3129,14 @@ dependencies = [ "checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" "checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -"checksum crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "acec9a3b0b3559f15aee4f90746c4e5e293b701c0f7d3925d24e01645267b68c" +"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061" "checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" -"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" +"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" "checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" "checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" "checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" +"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" "checksum crypto-mac 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0999b4ff4d3446d4ddb19a63e9e00c1876e75cd7000d20e57a693b4b3f08d958" "checksum daemonize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70c24513e34f53b640819f0ac9f705b673fcf4006d7aab8778bee72ebfc89815" "checksum derive-error 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ec098440b29ea3b1ece3e641bac424c19cf996779b623c9e0f2171495425c2c8" @@ -3173,7 +3153,7 @@ dependencies = [ "checksum errno-dragonfly 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" "checksum error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" -"checksum error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ab49e9dcb602294bc42f9a7dfc9bc6e936fca4418ea300dbfb84fe16de0b7d9" +"checksum error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d371106cc88ffdfb1eabd7111e432da544f16f3e2d7bf1dfe8bf575f1df045cd" "checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" "checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" "checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" @@ -3195,10 +3175,10 @@ dependencies = [ "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74fb74c4d5972b3aa616a9ceacc93f43d8ff9c189fd8530243d3feb32a672d62" "checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)" = "" -"checksum gpt 1.0.0 (git+https://github.com/mzhong1/gpt)" = "" +"checksum gpt 1.0.0 (git+https://github.com/Quyzi/gpt)" = "" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" "checksum hashicorp_vault 0.6.1 (git+https://github.com/cholcombe973/vault-rs.git)" = "" -"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" +"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum hmac 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44f3bdb08579d99d7dc761c0e266f13b5f2ab8c8c703b9fc9ef333cd8f48f55e" @@ -3212,7 +3192,7 @@ dependencies = [ "checksum hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum indexmap 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b54058f0a6ff80b6803da8faf8997cde53872b38f4023728f6830b06cd3c0dc" +"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" "checksum init-daemon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c403eb960d1fd543b064fd6abbb6990237d6b415ebf9a9732d758c85d3cae51f" "checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" "checksum ipnetwork 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a69dd5e3613374e74da81c251750153abe3bd0ad17641ea63d43d1e21d0dbd4d" @@ -3221,10 +3201,10 @@ dependencies = [ "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum lexical-core 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2304bccb228c4b020f3a4835d247df0a02a7c4686098d4167762cfbbe4c5cb14" +"checksum lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f86d66d380c9c5a685aaac7a11818bdfa1f733198dfd9ec09c70b762cd12ad6f" "checksum libatasmart 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e4c1c2a1c51b8f754a0962e602cee7d14cf57596ed5ef07720e7e531d5acda6b" "checksum libatasmart-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "deff6b65973033ac013e467b85af14cc7f9484e7cc8f9d3f11ee54db3e17e841" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" "checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" "checksum libredfish 0.1.0 (git+https://github.com/cholcombe973/libredfish?branch=generic)" = "" "checksum libudev-sys 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3c8469b4a23b962c1396b9b451dda50ef5b283e8dd309d69033475fa9b334324" @@ -3237,12 +3217,12 @@ dependencies = [ "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" "checksum md5 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "79c56d6a0b07f9e19282511c83fc5b086364cbae4ba8c7d5f190c3d9b0425a48" "checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" -"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" +"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" "checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" "checksum metadeps 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "73b122901b3a675fac8cecf68dcb2f0d3036193bc861d1ac0e1c337f7d5254c2" "checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" +"checksum mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" "checksum miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" @@ -3256,11 +3236,11 @@ dependencies = [ "checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" "checksum nom 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" "checksum nom 4.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9c349f68f25f596b9f44cf0e7c69752a5c633b0550c3ff849518bfba0233774a" -"checksum nom 5.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c433f4d505fe6ce7ff78523d2fa13a0b9f2690e181fc26168bcbe5ccc5d14e07" +"checksum nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" "checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" "checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" "checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -"checksum openssl 0.10.27 (registry+https://github.com/rust-lang/crates.io-index)" = "e176a45fedd4c990e26580847a525e39e16ec32ac78957dbf62ded31b3abfd6f" +"checksum openssl 0.10.28 (registry+https://github.com/rust-lang/crates.io-index)" = "973293749822d7dd6370d6da1e523b0d1db19f06c459134c658b2a4261378b52" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" "checksum openssl-sys 0.9.54 (registry+https://github.com/rust-lang/crates.io-index)" = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986" "checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" @@ -3290,10 +3270,10 @@ dependencies = [ "checksum postgres-shared 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffac35b3e0029b404c24a3b82149b4e904f293e8ca4a327eefa24d3ca50df36f" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" "checksum proc-macro2 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "77997c53ae6edd6d187fec07ec41b207063b5ee6f33680e9fa86d405cdd313d4" -"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" -"checksum protobuf 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6686ddd96a8dbe2687b5f2a687b2cfb520854010ec480f2d74c32e7c9873d3c5" -"checksum protobuf-codegen 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6456421eecf7fc72905868cd760c3e35848ded3552e480cfe67726ed4dbd8d23" -"checksum protobuf-codegen-pure 2.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4a7cb42d5ab6073333be90208ab5ea6ab41c8f6803b35fd773a7572624cc15c9" +"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +"checksum protobuf 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "37a5325d019a4d837d3abde0a836920f959e33d350f77b5f1e289e061e774942" +"checksum protobuf-codegen 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "64dd3a6192e0c6c1b0dae8f125b7f6b201c39fc487ebda0ee717d7a87fc47dc2" +"checksum protobuf-codegen-pure 2.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "037fa49710ee83b3be232ed53c5fce0bdb1b64c6aa6b1143a86640969c3e4b1d" "checksum publicsuffix 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9bf259a81de2b2eb9850ec990ec78e6a25319715584fd7652b9b26f96fcb1510" "checksum pwd 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5dd32d8bece608e144ca20251e714ed107cdecdabb20c2d383cfc687825106a5" "checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" @@ -3324,7 +3304,7 @@ dependencies = [ "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" "checksum regex 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ee84f70c8c08744ea9641a731c7fadb475bf2ecc52d7f627feb833e0b3990467" -"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" +"checksum regex-syntax 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "1132f845907680735a84409c3bebc64d1364a5683ffbce899550cd09d5eaefc1" "checksum relay 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" "checksum reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)" = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" @@ -3335,11 +3315,11 @@ dependencies = [ "checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" "checksum safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e27a8b19b835f7aea908818e871f5cc3a5a186550c30773be987e155e8163d8f" "checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" -"checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" +"checksum schannel 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "507a9e6e8ffe0a4e0ebb9a10293e62fdf7657c06f1b8bb07a8fcf697d2abf295" "checksum scheduled-thread-pool 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f5de7bc31f28f8e6c28df5e1bf3d10610f5fdc14cc95f272853512c70a2bd779" "checksum scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" "checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" "checksum security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" "checksum security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" @@ -3347,7 +3327,7 @@ dependencies = [ "checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" "checksum serde-xml-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0c06881f4313eec67d4ecfcd8e14339f6042cfc0de4b1bd3ceae74c29d597f68" "checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)" = "21b01d7f0288608a01dca632cf1df859df6fd6ffa885300fc275ce2ba6221953" +"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" "checksum serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" "checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0" "checksum shellscript 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "15c0d07fa97f8d209609a1a1549bd886bd907f520f75e1c785783167a66d20c4" @@ -3367,7 +3347,7 @@ dependencies = [ "checksum stringprep 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" "checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" -"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" "checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" "checksum syntex 0.42.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0a30b08a6b383a22e5f6edc127d169670d48f905bb00ca79a00ea3e442ebe317" @@ -3385,21 +3365,21 @@ dependencies = [ "checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" "checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" "checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" +"checksum tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" "checksum tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" -"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" -"checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" -"checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" -"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" +"checksum tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" +"checksum tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" +"checksum tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" +"checksum tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" "checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389" -"checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" +"checksum tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" -"checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" -"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" -"checksum tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c32ffea4827978e9aa392d2f743d973c1dfa3730a2ed3f22ce1e6984da848c" -"checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" -"checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" -"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" +"checksum tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" +"checksum tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" +"checksum tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" +"checksum tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" +"checksum tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" +"checksum tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" "checksum toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "736b60249cb25337bc196faa43ee12c705e426f3d55c214d73a4e7be06f92cb4" "checksum try-lock 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee2aa4715743892880f70885373966c83d73ef1b0838a664ef0c76fffd35e7c2" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" @@ -3425,7 +3405,6 @@ dependencies = [ "checksum uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" "checksum vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" "checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum want 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a05d9d966753fa4b5c8db73fcab5eed4549cfe0e1e4e66911e5564a0085c35d1" diff --git a/Cargo.toml b/Cargo.toml index 9479762..8951601 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,7 @@ dmi = {git = "https://github.com/cholcombe973/dmi"} fstab = "~0.3" gluster = "~1.0" goji = { git = "https://github.com/cholcombe973/goji.git" } -gpt = { git = "https://github.com/mzhong1/gpt"} +gpt = { git = "https://github.com/Quyzi/gpt"} hashicorp_vault = { git = "https://github.com/cholcombe973/vault-rs.git" } hostname = "~0.1" init-daemon = "~0.1" From 4702c9a25e5412ad3ec773e66c0511fbe8eda86f Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 10 Mar 2020 15:11:15 -0400 Subject: [PATCH 60/76] remove unused disk-manager code and update backend --- src/backend/ceph.rs | 2 +- src/client.rs | 2 +- src/disk_manager.rs | 251 -------------------------------------------- 3 files changed, 2 insertions(+), 253 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 2bacf4e..25d4818 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -802,7 +802,7 @@ impl CephBackend { // check if the device path exists (partition may have been deleted) if part1.exists() { // check if the osd_dir is mounted (might not be if partitions have been deleted) - if block_utils::is_mounted(&osd_dir)? { + if let Ok(true) = block_utils::is_mounted(&osd_dir) { // unmount the partition debug!("Unmount {}", part1.display()); block_utils::unmount_device(&part1)?; diff --git a/src/client.rs b/src/client.rs index c7a0b15..484a37b 100644 --- a/src/client.rs +++ b/src/client.rs @@ -80,7 +80,7 @@ fn list_disks(s: &Socket, client_id: Vec) -> BynarResult> { for disk in disks.get_disk() { d.push(disk.clone()); } - println!("disk list: {:?}", d); + trace!("disk list: {:?}", d); return Ok(d); } } diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 3891f79..76c3684 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -544,257 +544,6 @@ fn listen( std::thread::sleep(Duration::from_millis(10)); } } - /*pool.scope(|s| 'outer: loop { - if let Ok(responder) = responder.try_lock() { - let now = Instant::now(); - let events = poll_events!(responder, continue); - // is the socket readable? - if events.contains(zmq::PollEvents::POLLIN) { - //get the id first {STREAM sockets get messages with id prepended} - let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly - trace!("Client ID {:?}", client_id); - // get actual message - while responder.get_rcvmore()? { - let mut msg = responder.recv_bytes(0)?; - debug!("Got msg len: {}", msg.len()); - trace!("Parsing msg {:?} as hex", msg); - if msg.is_empty() { - continue; - } - while !msg.is_empty() { - let operation = match parse_from_bytes::(&msg.clone()) { - Ok(bytes) => bytes, - Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - continue; - } - }; - let client_id = client_id.clone(); - let size = operation.write_to_bytes()?.len(); - msg.drain((msg.len() - size)..msg.len()); - let (send_res, send_disk, send_ticket) = (send_res.clone(), send_disk.clone(), send_ticket.clone()); - - debug!("Operation requested: {:?}", operation.get_Op_type()); - if op_no_disk(&responder, &operation) { - continue; - } - // check if op is currently running. If so, skip it - if op_running!(&req_map, &operation) { - trace!("Operation {:?} cannot be run, disk is already running an operation", operation); - let mut op_res = OpOutcomeResult::new(); - op_res.set_disk(operation.get_disk().to_string()); - op_res.set_op_type(operation.get_Op_type()); - set_outcome_result!(ok => op_res, OpOutcome::SkipRepeat, false); - let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks - continue; - } - op_insert(&mut req_map, &operation); - - match operation.get_Op_type() { - Op::Add => { - let id = if operation.has_osd_id() { - Some(operation.get_osd_id()) - } else { - None - }; - s.spawn(move |_| { - let disk = operation.get_disk(); - match add_disk( - &send_res, - disk, - &backend_type, - id, - config_dir, - client_id, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } - } - }); - } - Op::AddPartition => { - // - } - Op::List => { - s.spawn(move |_| { - match list_disks(&send_disk, client_id) { - Ok(_) => { - info!("List disks finished"); - } - Err(e) => { - error!("List disks error: {:?}", e); - } - }; - }); - } - Op::Remove => { - let mut result = OpOutcomeResult::new(); - result.set_disk(operation.get_disk().to_string()); - result.set_op_type(Op::Remove); - - s.spawn(move |_| { - match safe_to_remove( - &Path::new(operation.get_disk()), - &backend_type, - config_dir, - ) { - Ok((OpOutcome::Success, true)) => { - match remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - config_dir, - client_id, - ) { - Ok(_) => { - info!("Remove disk finished"); - } - Err(e) => { - error!("Remove disk error: {:?}", e); - } - }; - } - Ok((OpOutcome::Skipped, val)) => { - debug!("Disk skipped"); - set_outcome_result!(ok => result, OpOutcome::Skipped, val); - let _ = send_res.send((client_id, result)); - } - Ok((OpOutcome::SkipRepeat, val)) => { - debug!("Disk skipped, safe to remove already ran"); - result.set_outcome(OpOutcome::SkipRepeat); - result.set_value(val); - result.set_result(ResultType::OK); - let _ = send_res.send((client_id, result)); - } - Ok((_, false)) => { - debug!("Disk is not safe to remove"); - //Response to client - result.set_value(false); - result.set_outcome(OpOutcome::Success); - result.set_result(ResultType::ERR); - result.set_error_msg( - "Not safe to remove disk".to_string(), - ); - let _ = send_res.send((client_id, result)); - } - Err(e) => { - error!("safe to remove failed: {:?}", e); - // Response to client - result.set_value(false); - result.set_result(ResultType::ERR); - result.set_error_msg(e.to_string()); - let _ = send_res.send((client_id, result)); - } - }; - }); - } - Op::SafeToRemove => { - s.spawn(move |_| { - match safe_to_remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - config_dir, - client_id, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); - } - Err(e) => { - error!("Safe to remove error: {:?}", e); - } - }; - }); - } - Op::GetCreatedTickets => { - match get_jira_tickets(&send_ticket, config_dir, client_id) { - Ok(_) => { - info!("Fetching jira tickets finished"); - } - Err(e) => { - error!("Fetching jira error: {:?}", e); - } - }; - } - }; - } - } - } - if events.contains(zmq::PollEvents::POLLOUT) { - //check disks first, since those are faster requests than add/remove reqs - match recv_disk.try_recv() { - Ok((client_id, result)) => { - // send result back to client - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - Err(_) => { - // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) - match recv_ticket.try_recv() { - Ok((client_id, result)) => { - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - Err(_) => { - // no disks in the queue, check if any add/remove/safe-to-remove req results - if let Ok((client_id, result)) = recv_res.try_recv() { - //check if result is SkipRepeat, if so, skipp the assert! and insert - debug!("Send {:?}", result); - if OpOutcome::SkipRepeat != result.get_outcome() { - assert!(op_running!(req_map, &result, true)); - req_map.insert(get_op_pathbuf!(&result), None); // set entry in req_map to None - } - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - } - } - } - } - } - if daemon { - while now.elapsed() < Duration::from_millis(10) { - for signal in signals.pending() { - match signal as c_int { - signal_hook::SIGHUP => { - // Don't actually need to reload the config, since it gets reloaded on every call to backend... - debug!("Requested to reload config file"); - let config: DiskManagerConfig = match helpers::load_config(&config_dir, "disk-manager.json") { - Ok(p) => p, - Err(e) => { - error!("Failed to load config file {}", e); - continue - } - }; - notify_slack( - &config, - &"Requested to reload config, ignoring request: config changes already loaded".to_string(), - ) - .expect("Unable to connect to slack"); - } - signal_hook::SIGINT | signal_hook::SIGCHLD => { - //skip this - debug!("Ignore signal"); - continue; - } - signal_hook::SIGTERM => { - //"gracefully" exit - debug!("Exit Process"); - break 'outer Ok(()); - } - _ => unreachable!(), - } - } - } - } else { - std::thread::sleep(Duration::from_millis(10)); - } - } - })?;*/ Ok(()) } From bbd763f8b0f08aed1644b38f8a2b5f4a10ff4363 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 31 Mar 2020 10:19:15 -0400 Subject: [PATCH 61/76] add comment, small loop sleep, and CentOS package dependencies --- README.md | 4 ++++ src/backend/ceph.rs | 1 + src/disk_manager.rs | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b004328..5415d84 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,10 @@ CLI command to install all the dependencies: sudo apt install libzmq3-dev libprotobuf-dev librados2 libatasmart-dev libssl-dev libblkid-dev libudev-dev librados-dev pkg-config libclang-dev llvm libdevmapper-dev liblvm2-dev liblvm2app2.2 gcc clang smartmontools parted ``` +CentOS7 +``` +yum install -y zeromq-devel protobuf-devel librados2 libatasmart-devel openssl-devel libblkid-devel libudev-devel librados2-devel pkgconfig clang-devel zeromq llvm device-mapper-devel gcc clang smartmontools parted +``` ### Working Rust environment diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 25d4818..7b767fd 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -455,6 +455,7 @@ impl CephBackend { ceph_bluestore_tool(&lv_dev_name, &mount_point, simulate)?; let host_info = Host::new()?; + // There are 1,073,741,824 bytes in a gibibyte //let gb_capacity = vg_size / 1_073_741_824; let osd_weight = 0.0; debug!( diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 76c3684..9602195 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -540,8 +540,9 @@ fn listen( } } } - } else { std::thread::sleep(Duration::from_millis(10)); + } else { + std::thread::sleep(Duration::from_millis(100)); } } Ok(()) From d383c9b2244618b240dca3cacd00476b947b3d95 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Tue, 31 Mar 2020 16:49:46 -0400 Subject: [PATCH 62/76] move the smartctl enable as a soft error, since smartctl -H runs regardless --- src/main.rs | 86 +++++++++++++++++++++++++++++++----------------- src/test_disk.rs | 30 ++++++++--------- 2 files changed, 70 insertions(+), 46 deletions(-) diff --git a/src/main.rs b/src/main.rs index ffd26a6..30ba17d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -52,34 +52,39 @@ use std::process; use std::process::Command; use std::time::{Duration, Instant}; -// a specific operation and its outcome +/// a specific operation and its outcome #[derive(Debug, Clone)] struct DiskOp { - pub op_type: Op, // operation type - // the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) - // Or, if an add_disk request, description is the ticket_id + /// the operation type + pub op_type: Op, + /// the description for a JIRA ticket if necessary (None if not Safe-to-remove/Remove-disk) + /// Or, if an add_disk request, description is the ticket_id pub description: Option, - pub operation_id: Option, // the operation id if one exists (for safe-to-remove, remove request handling) - pub ret_val: Option, //None if outcome not yet determined + /// the operation id in the database if one exists for Safe-To-Remove/Remove requst handling + pub operation_id: Option, + /// This value is None if the outcome has not yet been recieved + pub ret_val: Option, } impl DiskOp { + /// create a new DiskOp from an operation, description, and operation id pub fn new(op: Operation, description: Option, operation_id: Option) -> DiskOp { DiskOp { op_type: op.get_Op_type(), description, operation_id, ret_val: None } } } -// create a message map to handle list of disk-manager requests +// create a message map to handle a list of disk-manager requests +// The message map is a nested HashMap, mapping a disk to a list of partitions (including the disk path itself), which maps associated operations in progress fn create_msg_map( pool: &Pool, host_mapping: &HostDetailsMapping, ) -> BynarResult>>> { - // List out currently mounted block_devices + // List out currently mounted block_devices, filtering out loop and cd/rom devices let mut devices: Vec = block_utils::get_block_devices()? .into_iter() - .filter(|b| { - !(if let Some(p) = b.as_path().file_name() { - (p.to_string_lossy().starts_with("sr") || p.to_string_lossy().starts_with("loop")) + .filter(|block_device| { + !(if let Some(path) = block_device.as_path().file_name() { + (path.to_string_lossy().starts_with("sr") || path.to_string_lossy().starts_with("loop")) } else { true }) @@ -92,19 +97,21 @@ fn create_msg_map( .collect(); let mut map: HashMap>> = HashMap::new(); + // get a list of partition device paths let partitions: Vec = db_devices .clone() .into_iter() - .filter(|p| match block_utils::is_disk(p) { - Err(_) => p.to_string_lossy().chars().last().unwrap().is_digit(10), - Ok(b) => b, + .filter(|path| match block_utils::is_disk(path) { + Err(_) => path.to_string_lossy().chars().last().unwrap().is_digit(10), // check if the last character is a digit (in case the disk is unmounted) + Ok(is_disk) => is_disk, }) .collect(); + // get the list of disk paths from the database devices let mut disks: Vec = db_devices .into_iter() - .filter(|p| match block_utils::is_disk(p) { - Err(_) => !p.to_string_lossy().chars().last().unwrap().is_digit(10), - Ok(b) => b, + .filter(|path| match block_utils::is_disk(path) { + Err(_) => !path.to_string_lossy().chars().last().unwrap().is_digit(10), + Ok(is_disk) => is_disk, }) .collect(); devices.append(&mut disks); @@ -128,8 +135,8 @@ fn create_msg_map( Ok(map) } -// given a path, return a parent-child, or parent-parent tuple to -// look through the request map with, or error +// given a path, return a (parent,child), or (parent,parent) tuple to +// look through the request map with, or error out fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { if let Some(parent) = block_utils::get_parent_devpath_from_path(dev_path)? { Ok((parent, dev_path)) @@ -139,6 +146,7 @@ fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { // partition was destroyed...probably // make parent path let mut str_path = dev_path.to_string_lossy().to_string(); + // device and partition naming conventions have ssd and hard disks end in the partition number for a partition, remove the numbers and you get the disk path while str_path.chars().last().unwrap().is_digit(10) { str_path = str_path[0..str_path.len() - 1].to_string(); } @@ -147,9 +155,9 @@ fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { Ok((path, dev_path)) // partition probably } else if str_path.starts_with("/dev/sd") || str_path.starts_with("/dev/hd") - || str_path.starts_with("/dev/nvme") + || str_path.starts_with("/dev/nvme") //note nvme devices are slightly different in naming convention { - Ok((dev_path.to_path_buf(), dev_path)) // disk...probably + Ok((dev_path.to_path_buf(), dev_path)) // this is the disk path, unless the path is an nvme device } else { // path just doesn't exist, so error... error!("Path {} does not exist, nor does its parent.", dev_path.display()); @@ -662,8 +670,11 @@ fn is_all_finished( // if OpOutcome:: Success + Op::Remove, is fine? if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + if !(ret.get_result() == ResultType::ERR) + && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -719,7 +730,10 @@ fn open_jira_ticket( add_or_update_operation_detail(pool, &mut operation_detail)?; } Err(e) => { - let _ = notify_slack(config, &format!("Unable to create ticket {:?} with description:\n {}", e, description)); + let _ = notify_slack( + config, + &format!("Unable to create ticket {:?} with description:\n {}", e, description), + ); } } /* @@ -2351,8 +2365,12 @@ mod tests { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + if !(ret.get_result() == ResultType::ERR) + && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + && !(ret.get_outcome() == OpOutcome::Success + && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -2402,8 +2420,12 @@ mod tests { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + if !(ret.get_result() == ResultType::ERR) + && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + && !(ret.get_outcome() == OpOutcome::Success + && ret.get_op_type() == Op::Remove) { all_finished = false; } @@ -2447,8 +2469,12 @@ mod tests { //check if value finished if let Some(val) = v { if let Some(ret) = &val.ret_val { - if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success - && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + if !(ret.get_result() == ResultType::ERR) + && !(ret.get_outcome() != OpOutcome::Success + && (ret.get_op_type() == Op::SafeToRemove + || ret.get_op_type() == Op::Remove)) + && !(ret.get_outcome() == OpOutcome::Success + && ret.get_op_type() == Op::Remove) { all_finished = false; } diff --git a/src/test_disk.rs b/src/test_disk.rs index c4c4bc2..af2fca8 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -1520,24 +1520,22 @@ fn repair_ext(device: &Path) -> BynarResult<()> { fn run_smartctl_check(device: &Path) -> BynarResult { // Enable Smart Scan let out = Command::new("smartctl").args(&["-s", "on", &device.to_string_lossy()]).output()?; - let status = match out.status.code() { + match out.status.code() { Some(code) => match code { // no errors, smart enabled - 0 => { - let out = - Command::new("smartctl").args(&["-H", &device.to_string_lossy()]).output()?; //Run overall health scan - match out.status.code() { - Some(code) => match code { - // no errors, health scan successful - 0 => true, - _ => false, - }, - //Process terminated by signal - None => return Err(BynarError::from("smartctl terminated by signal")), - } - } - // could not enable smart checks - _ => return Err(BynarError::from("smartctl could not enable smart checks")), + 0 => trace!("smartctl enabled"), + // could not enable smart checks, should still be able to run smart health checks though + _ => error!("smartctl could not enable smart checks"), + }, + //Process terminated by signal + None => return Err(BynarError::from("smartctl terminated by signal")), + } + let out = Command::new("smartctl").args(&["-H", &device.to_string_lossy()]).output()?; //Run overall health scan + let status = match out.status.code() { + Some(code) => match code { + // no errors, health scan successful + 0 => true, + _ => false, }, //Process terminated by signal None => return Err(BynarError::from("smartctl terminated by signal")), From 103e7bf397260983ce8784f5adf3e38ad5bcaaaa Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 1 Apr 2020 10:10:11 -0400 Subject: [PATCH 63/76] Fix checking if disk operation already done to account for bluestore non-lvm OSDs and fix database ticket update function to use correct column name --- src/backend/ceph.rs | 24 +++++++++++++++++++----- src/in_progress.rs | 2 +- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 7b767fd..0f2cb11 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -1278,8 +1278,9 @@ impl Backend for CephBackend { return Ok(OpOutcome::Skipped); } } + let osd_config = get_osd_config_by_path(&self.config, device)?; // check if the disk is already in the cluster - if is_device_in_cluster(&self.cluster_handle, device)? { + if is_device_in_cluster(&self.cluster_handle, device, osd_config.is_lvm)? { debug!("Device {} is already in the cluster. Skipping", device.display()); return Ok(OpOutcome::SkipRepeat); } @@ -1304,7 +1305,7 @@ impl Backend for CephBackend { let path_check = if !osd_config.is_lvm { get_second_partition(device)? } else { device.to_path_buf() }; // check if the disk is already out of the cluster - if !is_device_in_cluster(&self.cluster_handle, &path_check)? { + if !is_device_in_cluster(&self.cluster_handle, &path_check, osd_config.is_lvm)? { debug!("Device {} is already out of the cluster. Skipping", device.display()); return Ok(OpOutcome::SkipRepeat); } @@ -1378,19 +1379,32 @@ fn get_second_partition(device: &Path) -> BynarResult { } // Check if a device path is already in the cluster -fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path) -> BynarResult { - debug!("Check if device is in cluster"); +fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path, is_lvm: bool) -> BynarResult { + debug!("Check if device {} is in cluster", dev_path.display()); let host = get_hostname().ok_or_else(|| BynarError::from("hostname not found"))?; trace!("Hostname is {:?}", host); - let path = dev_path.to_string_lossy(); + let mut path = dev_path.to_string_lossy().to_string(); let osd_meta = osd_metadata(cluster_handle)?; for osd in osd_meta { match osd.objectstore_meta { ObjectStoreMeta::Bluestore { bluestore_bdev_partition_path, .. } => { + if is_lvm{ if bluestore_bdev_partition_path == path && osd.hostname == host { return Ok(true); } } + else{ + // osd metadate with have either /dev/xx1 or /dev/xx2 + while path.chars().last().unwrap().is_digit(10) { + path = path[0..path.len() - 1].to_string(); + } + let path1 = format!("{}1", path); + let path2 = format!("{}2", path); + if (bluestore_bdev_partition_path == path1 || bluestore_bdev_partition_path == path2) && osd.hostname == host{ + return Ok(true); + } + } + } ObjectStoreMeta::Filestore { backend_filestore_partition_path, .. } => { if backend_filestore_partition_path == path && osd.hostname == host { diff --git a/src/in_progress.rs b/src/in_progress.rs index 016db8f..17ee052 100644 --- a/src/in_progress.rs +++ b/src/in_progress.rs @@ -1064,7 +1064,7 @@ pub fn resolve_ticket_in_db(pool: &Pool, ticket_id: &str) -> // TODO[SD]: make sure there is one ticket with this ID let stmt = format!( - "UPDATE operation_details SET status='{}' WHERE ticket_id='{}'", + "UPDATE operation_details SET status='{}' WHERE tracking_id='{}'", OperationStatus::Complete, ticket_id ); From 50ed4461c447c555faf9ace24512b1206be5bd9b Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 2 Apr 2020 14:40:50 -0400 Subject: [PATCH 64/76] Add check in osd metadata for non-lvm bluestore osds, remove SkipRepeat operations once finished running --- src/backend/ceph.rs | 38 ++++++++++++++++++++++---------------- src/disk_manager.rs | 5 +++-- src/main.rs | 44 ++++++++++++++++++++++++++------------------ 3 files changed, 51 insertions(+), 36 deletions(-) diff --git a/src/backend/ceph.rs b/src/backend/ceph.rs index 0f2cb11..b18200a 100644 --- a/src/backend/ceph.rs +++ b/src/backend/ceph.rs @@ -1379,7 +1379,11 @@ fn get_second_partition(device: &Path) -> BynarResult { } // Check if a device path is already in the cluster -fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path, is_lvm: bool) -> BynarResult { +fn is_device_in_cluster( + cluster_handle: &Rados, + dev_path: &Path, + is_lvm: bool, +) -> BynarResult { debug!("Check if device {} is in cluster", dev_path.display()); let host = get_hostname().ok_or_else(|| BynarError::from("hostname not found"))?; trace!("Hostname is {:?}", host); @@ -1388,23 +1392,25 @@ fn is_device_in_cluster(cluster_handle: &Rados, dev_path: &Path, is_lvm: bool) - for osd in osd_meta { match osd.objectstore_meta { ObjectStoreMeta::Bluestore { bluestore_bdev_partition_path, .. } => { - if is_lvm{ - if bluestore_bdev_partition_path == path && osd.hostname == host { - return Ok(true); - } - } - else{ - // osd metadate with have either /dev/xx1 or /dev/xx2 - while path.chars().last().unwrap().is_digit(10) { - path = path[0..path.len() - 1].to_string(); - } - let path1 = format!("{}1", path); - let path2 = format!("{}2", path); - if (bluestore_bdev_partition_path == path1 || bluestore_bdev_partition_path == path2) && osd.hostname == host{ - return Ok(true); + if is_lvm { + if bluestore_bdev_partition_path == path && osd.hostname == host { + return Ok(true); + } + } else { + // osd metadate with have either /dev/xx1 or /dev/xx2 + while path.chars().last().unwrap().is_digit(10) { + path = path[0..path.len() - 1].to_string(); + } + let path1 = format!("{}1", path); + let path2 = format!("{}2", path); + if (bluestore_bdev_partition_path == path1 + || bluestore_bdev_partition_path == path2) + && osd.hostname == host + { + return Ok(true); + } } } - } ObjectStoreMeta::Filestore { backend_filestore_partition_path, .. } => { if backend_filestore_partition_path == path && osd.hostname == host { diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 9602195..8a853db 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -321,6 +321,7 @@ fn listen( "Operation {:?} cannot be run, disk is already running an operation", operation ); + trace!("Operations: {:?}", req_map); let mut op_res = OpOutcomeResult::new(); op_res.set_disk(operation.get_disk().to_string()); op_res.set_op_type(operation.get_Op_type()); @@ -494,9 +495,9 @@ fn listen( debug!("Send {:?}", result); if OpOutcome::SkipRepeat != result.get_outcome() { assert!(op_running!(req_map, &result, true)); - req_map.insert(get_op_pathbuf!(&result), None); - // set entry in req_map to None } + // set entry in req_map to None + req_map.insert(get_op_pathbuf!(&result), None); let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } diff --git a/src/main.rs b/src/main.rs index 30ba17d..b6135b6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -61,9 +61,9 @@ struct DiskOp { /// Or, if an add_disk request, description is the ticket_id pub description: Option, /// the operation id in the database if one exists for Safe-To-Remove/Remove requst handling - pub operation_id: Option, + pub operation_id: Option, /// This value is None if the outcome has not yet been recieved - pub ret_val: Option, + pub ret_val: Option, } impl DiskOp { @@ -84,7 +84,8 @@ fn create_msg_map( .into_iter() .filter(|block_device| { !(if let Some(path) = block_device.as_path().file_name() { - (path.to_string_lossy().starts_with("sr") || path.to_string_lossy().starts_with("loop")) + (path.to_string_lossy().starts_with("sr") + || path.to_string_lossy().starts_with("loop")) } else { true }) @@ -106,7 +107,7 @@ fn create_msg_map( Ok(is_disk) => is_disk, }) .collect(); - // get the list of disk paths from the database devices + // get the list of disk paths from the database devices let mut disks: Vec = db_devices .into_iter() .filter(|path| match block_utils::is_disk(path) { @@ -155,7 +156,8 @@ fn get_request_keys(dev_path: &PathBuf) -> BynarResult<(PathBuf, &PathBuf)> { Ok((path, dev_path)) // partition probably } else if str_path.starts_with("/dev/sd") || str_path.starts_with("/dev/hd") - || str_path.starts_with("/dev/nvme") //note nvme devices are slightly different in naming convention + || str_path.starts_with("/dev/nvme") + //note nvme devices are slightly different in naming convention { Ok((dev_path.to_path_buf(), dev_path)) // this is the disk path, unless the path is an nvme device } else { @@ -258,13 +260,14 @@ fn get_disk_map_op( } fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { - let c = config.clone(); - let slack = Slack::new(c.slack_webhook.expect("slack webhook option is None").as_ref())?; - let slack_channel = c.slack_channel.unwrap_or_else(|| "".to_string()); - let bot_name = c.slack_botname.unwrap_or_else(|| "".to_string()); - let p = PayloadBuilder::new().text(msg).channel(slack_channel).username(bot_name).build()?; - - let res = slack.send(&p); + let conf = config.clone(); + let slack = Slack::new(conf.slack_webhook.expect("slack webhook option is None").as_ref())?; + let slack_channel = conf.slack_channel.unwrap_or_else(|| "".to_string()); + let bot_name = conf.slack_botname.unwrap_or_else(|| "".to_string()); + let payload = + PayloadBuilder::new().text(msg).channel(slack_channel).username(bot_name).build()?; + + let res = slack.send(&payload); match res { Ok(_) => debug!("Slack notified"), Err(e) => error!("Slack error: {:?}", e), @@ -664,17 +667,23 @@ fn is_all_finished( // check if all ops in the disk have finished let disk = get_disk_map_op(message_map, &dev_path)?; let mut all_finished = true; - disk.iter().for_each(|(_, v)| { + disk.iter().for_each(|(_partition, operation)| { //check if value finished - // if OpOutcome:: Success and OpSafeToRemove, then false + // if OpOutcome:: Success and OpSafeToRemove, then true + //if safeToRemove Success and false => true // if OpOutcome:: Success + Op::Remove, is fine? - if let Some(val) = v { - if let Some(ret) = &val.ret_val { + if let Some(op) = operation { + if let Some(ret) = &op.ret_val { + //if Err, then its done + // if its safeToRemove Success + false then all_finished is true if !(ret.get_result() == ResultType::ERR) && !(ret.get_outcome() != OpOutcome::Success && (ret.get_op_type() == Op::SafeToRemove || ret.get_op_type() == Op::Remove)) && !(ret.get_outcome() == OpOutcome::Success && ret.get_op_type() == Op::Remove) + && !(ret.get_outcome() == OpOutcome::Success + && !ret.get_value() + && ret.get_op_type() == Op::SafeToRemove) { all_finished = false; } @@ -1289,9 +1298,8 @@ fn main() { } _ => info!("Send and Recieve successfully ran"), }; - debug!("Message Queue after looping {:?}", message_queue); } - debug!("Request Map after looping {:?}", message_map); + trace!("Request Map after looping {:?}", message_map); } debug!("Bynar exited successfully"); notify_slack(&config, &format!("Bynar on host {} has stopped", host_info.hostname)) From e13a72ce5dc07d29545f7d25f8bc05fdbe793089 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 13 Apr 2020 14:58:37 -0400 Subject: [PATCH 65/76] update TOML to use correct block-utils version and vault crates --- Cargo.lock | 22 ++++++++++++++-------- Cargo.toml | 2 +- src/main.rs | 9 +++++++-- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72b90ec..08b0318 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -123,6 +123,11 @@ name = "base64" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "base64" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bindgen" version = "0.40.0" @@ -198,7 +203,7 @@ dependencies = [ [[package]] name = "block-utils" version = "0.6.2" -source = "git+https://github.com/mzhong1/block-utils.git#79c534b33270a07035709c58dae07c3b3b609ad3" +source = "git+https://github.com/mzhong1/block-utils.git#11457007ff4a274d13a40dc515827fd7f3dfb10f" dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -222,7 +227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bynar" -version = "0.1.8" +version = "0.1.9" dependencies = [ "api 0.1.0", "blkid 0.2.1 (git+https://github.com/cholcombe973/blkid.git)", @@ -240,7 +245,7 @@ dependencies = [ "gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)", "gpt 1.0.0 (git+https://github.com/Quyzi/gpt)", - "hashicorp_vault 0.6.1 (git+https://github.com/cholcombe973/vault-rs.git)", + "hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)", "hostname 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "init-daemon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "json 0.11.15 (registry+https://github.com/rust-lang/crates.io-index)", @@ -885,13 +890,13 @@ dependencies = [ [[package]] name = "hashicorp_vault" -version = "0.6.1" -source = "git+https://github.com/cholcombe973/vault-rs.git#f6cb10222299895a2ccd236bc1a5975a8c69a7f4" +version = "1.0.0" +source = "git+https://github.com/cholcombe973/vault-rs.git#db058913a3bf4ebdb307257a4f11d6c45704ce14" dependencies = [ + "base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.11.27 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3094,6 +3099,7 @@ dependencies = [ "checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" "checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +"checksum base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" "checksum base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96434f987501f0ed4eb336a411e0631ecd1afa11574fe148587adc4ff96143c9" "checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" "checksum bindgen 0.40.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8f4c4ffe91e0f26bdcc5a8dd58cbf0358ad772b8ec1ae274a11a0ba54ec175f4" @@ -3177,7 +3183,7 @@ dependencies = [ "checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)" = "" "checksum gpt 1.0.0 (git+https://github.com/Quyzi/gpt)" = "" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -"checksum hashicorp_vault 0.6.1 (git+https://github.com/cholcombe973/vault-rs.git)" = "" +"checksum hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)" = "" "checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" diff --git a/Cargo.toml b/Cargo.toml index 8951601..ab567d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bynar" -version = "0.1.8" +version = "0.1.9" authors = ["Chris Holcombe "] description = "Server remediation as a service" license = "Apache-2.0" diff --git a/src/main.rs b/src/main.rs index b6135b6..129643d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -259,6 +259,7 @@ fn get_disk_map_op( Err(BynarError::from(format!("Path {} is not a disk in the map", dev_path.display()))) } +// Send a message to Slack fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { let conf = config.clone(); let slack = Slack::new(conf.slack_webhook.expect("slack webhook option is None").as_ref())?; @@ -275,6 +276,7 @@ fn notify_slack(config: &ConfigSettings, msg: &str) -> BynarResult<()> { Ok(()) } +// get the public key needed to connect to the disk-manager fn get_public_key(config: &ConfigSettings, host_info: &Host) -> BynarResult> { // If vault_endpoint and token are set we should get the key from vault // Otherwise we need to know where the public_key is located? @@ -318,6 +320,8 @@ fn add_disk_to_description( .push_str(&format!("\nDisk vendor: {:?}", state_machine.block_device.scsi_info.vendor)); } +// run the state machine and check for failed disks. +// failed disks are sent to the message queue to check and attempt automatic removal fn check_for_failed_disks( message_map: &mut HashMap>>, message_queue: &mut VecDeque<(Operation, Option, Option)>, @@ -342,7 +346,7 @@ fn check_for_failed_disks( test_disk::check_all_disks(&host_info, pool, host_mapping)?.into_iter().collect(); // separate the states into Ok and Errors let usable_states: Vec = match all_states { - Ok(s) => s, + Ok(state) => state, Err(e) => { error!("check_all_disks failed with error: {:?}", e); return Err(BynarError::new(format!("check_all_disks failed with error: {:?}", e))); @@ -384,7 +388,7 @@ fn check_for_failed_disks( add_or_update_map_op(message_map, &state_machine.block_device.dev_path, None)?; } let disks = get_disk_map_op(message_map, &state_machine.block_device.dev_path)?; - // uh, get list of keys in disks and filter usable list for keypath? + // get list of keys in disks and filter usable list for keypath let mut add: Vec<_> = usable_states .iter() .filter(|state_machine| { @@ -461,6 +465,7 @@ fn check_for_failed_disks( Ok(()) } +// Evaluate the hardware information returned from redfish fn evaluate( results: Vec>, config: &ConfigSettings, From eae5e98de8d8ec47bcd487d457cc7f3caca5c508 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 20 Apr 2020 14:10:59 -0400 Subject: [PATCH 66/76] update goji and arrange function to prevent opening Client without use --- Cargo.lock | 16 +++++----------- Cargo.toml | 6 +++--- src/create_support_ticket.rs | 4 +++- src/main.rs | 6 +++--- src/test_hardware.rs | 9 ++++----- 5 files changed, 18 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08b0318..0182eb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,12 +227,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bynar" -version = "0.1.9" +version = "0.2.0" dependencies = [ "api 0.1.0", "blkid 0.2.1 (git+https://github.com/cholcombe973/blkid.git)", "block-utils 0.6.2 (git+https://github.com/mzhong1/block-utils.git)", - "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "ceph 3.2.0 (git+https://github.com/mzhong1/ceph-rust)", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -243,7 +243,7 @@ dependencies = [ "dmi 0.1.0 (git+https://github.com/cholcombe973/dmi)", "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)", + "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96)", "gpt 1.0.0 (git+https://github.com/Quyzi/gpt)", "hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)", "hostname 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -301,11 +301,6 @@ dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "bytes" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "c2-chacha" version = "0.2.3" @@ -850,7 +845,7 @@ dependencies = [ [[package]] name = "goji" version = "0.2.4" -source = "git+https://github.com/cholcombe973/goji.git#e0006da520c1afb443e1fbc2c201ccaed50345d9" +source = "git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96#96aef5ea21d8b5ec3d83209de708fa2e13cd5c96" dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3116,7 +3111,6 @@ dependencies = [ "checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" "checksum byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d" "checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum case 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e88b166b48e29667f5443df64df3c61dc07dc2b1a0b0d231800e07f09a33ecc1" "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" @@ -3180,7 +3174,7 @@ dependencies = [ "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74fb74c4d5972b3aa616a9ceacc93f43d8ff9c189fd8530243d3feb32a672d62" -"checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git)" = "" +"checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96)" = "" "checksum gpt 1.0.0 (git+https://github.com/Quyzi/gpt)" = "" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" "checksum hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)" = "" diff --git a/Cargo.toml b/Cargo.toml index ab567d3..86df9ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bynar" -version = "0.1.9" +version = "0.2.0" authors = ["Chris Holcombe "] description = "Server remediation as a service" license = "Apache-2.0" @@ -52,7 +52,7 @@ dirs = "~2.0" dmi = {git = "https://github.com/cholcombe973/dmi"} fstab = "~0.3" gluster = "~1.0" -goji = { git = "https://github.com/cholcombe973/goji.git" } +goji = { git = "https://github.com/cholcombe973/goji.git", rev = "96aef5ea21d8b5ec3d83209de708fa2e13cd5c96" } gpt = { git = "https://github.com/Quyzi/gpt"} hashicorp_vault = { git = "https://github.com/cholcombe973/vault-rs.git" } hostname = "~0.1" @@ -73,7 +73,7 @@ pwd = "~1.3" r2d2 = "~0.8" r2d2_postgres = "~0.14" rayon = "~1.0" -reqwest = "~0.9" +reqwest = {version ="~0.9", features =["native-tls"]} serde = "~1" serde_derive = "~1" serde_json = "~1" diff --git a/src/create_support_ticket.rs b/src/create_support_ticket.rs index 3ee6e84..1fb2000 100644 --- a/src/create_support_ticket.rs +++ b/src/create_support_ticket.rs @@ -13,13 +13,15 @@ pub fn create_support_ticket( ) -> BynarResult { let issue_description = CreateIssue { fields: Fields { - assignee: Assignee { name: settings.jira_ticket_assignee.clone() }, + assignee: Assignee { name: settings.jira_user.clone() }, components: vec![Component { name: "Ceph".into() }], description: description.into(), issuetype: IssueType { id: settings.jira_issue_type.clone() }, priority: Priority { id: settings.jira_priority.clone() }, project: Project { key: settings.jira_project_id.clone() }, summary: title.into(), + reporter: Assignee { name: settings.jira_ticket_assignee.clone() }, + environment: "".to_string(), }, }; let jira: Jira = match settings.proxy { diff --git a/src/main.rs b/src/main.rs index 129643d..fae4992 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1039,7 +1039,7 @@ fn main() { Arg::with_name("time") .help("Time in seconds between Bynar runs") .long("time") - .default_value("5"), + .default_value("60"), ) .get_matches(); @@ -1301,12 +1301,12 @@ fn main() { error!("Send or Receive messages failed with error: {}", e); break 'outer; } - _ => info!("Send and Recieve successfully ran"), + _ => trace!("Send and Recieve successfully ran"), }; } trace!("Request Map after looping {:?}", message_map); } - debug!("Bynar exited successfully"); + info!("Bynar exited successfully"); notify_slack(&config, &format!("Bynar on host {} has stopped", host_info.hostname)) .expect("Unable to connect to slack"); } diff --git a/src/test_hardware.rs b/src/test_hardware.rs index 0af4fbc..68e999e 100644 --- a/src/test_hardware.rs +++ b/src/test_hardware.rs @@ -22,11 +22,6 @@ pub struct HardwareHealthSummary { } fn collect_redfish_info(config: &ConfigSettings) -> BynarResult { - let client = Client::builder() - .danger_accept_invalid_certs(true) - .danger_accept_invalid_hostnames(true) - .build()?; - if config.redfish_ip.is_none() { debug!("Redfish ip address not specified. Skipping checks"); return Ok(HardwareHealthSummary { @@ -38,6 +33,10 @@ fn collect_redfish_info(config: &ConfigSettings) -> BynarResult Date: Mon, 20 Apr 2020 15:19:47 -0400 Subject: [PATCH 67/76] Sleep the poll_events so it doesn't spin --- src/disk_manager.rs | 3 ++- src/util.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 8a853db..0050e51 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -298,7 +298,7 @@ fn listen( debug!("Got msg len: {}", msg.len()); trace!("Parsing msg {:?} as hex", msg); if msg.is_empty() { - continue; + continue; // its the ID message, so skip } while !msg.is_empty() { let operation = match parse_from_bytes::(&msg.clone()) { @@ -545,6 +545,7 @@ fn listen( } else { std::thread::sleep(Duration::from_millis(100)); } + std::thread::sleep(Duration::from_millis(100)); } Ok(()) } diff --git a/src/util.rs b/src/util.rs index 5d862d7..f054df6 100644 --- a/src/util.rs +++ b/src/util.rs @@ -27,6 +27,7 @@ macro_rules! poll_events { match $s.get_events() { Err(zmq::Error::EBUSY) => { debug!("Socket Busy, skip"); + std::thread::sleep(std::time::Duration::from_millis(100)); $ret; } Err(e) => { From 9bfc05ad450da270dcb35e138cba83b02a2b2f60 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Wed, 22 Apr 2020 12:52:49 -0400 Subject: [PATCH 68/76] fix the skip signal handling issue and add sleep to loop --- src/disk_manager.rs | 303 +++++++++++++++++++++++--------------------- src/main.rs | 2 +- 2 files changed, 158 insertions(+), 147 deletions(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index 0050e51..f7aae2a 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -284,111 +284,112 @@ fn listen( debug!("Create request map"); let mut req_map = create_req_map()?; let mut messages: VecDeque<(Operation, Vec)> = VecDeque::new(); - loop { + 'outer: loop { let now = Instant::now(); - let events = poll_events!(responder, continue); - // is the socket readable? - if events.contains(zmq::PollEvents::POLLIN) { - //get the id first {STREAM sockets get messages with id prepended} - let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly - trace!("Client ID {:?}", client_id); - // get actual message - while responder.get_rcvmore()? { - let mut msg = responder.recv_bytes(0)?; - debug!("Got msg len: {}", msg.len()); - trace!("Parsing msg {:?} as hex", msg); - if msg.is_empty() { - continue; // its the ID message, so skip - } - while !msg.is_empty() { - let operation = match parse_from_bytes::(&msg.clone()) { - Ok(bytes) => bytes, - Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); - continue; + match responder.get_events() { + Ok(events) => { + // is the socket readable? + if events.contains(zmq::PollEvents::POLLIN) { + //get the id first {STREAM sockets get messages with id prepended} + let client_id = responder.recv_bytes(0)?; //leave as Vec, not utf8 friendly + trace!("Client ID {:?}", client_id); + // get actual message + while responder.get_rcvmore()? { + let mut msg = responder.recv_bytes(0)?; + debug!("Got msg len: {}", msg.len()); + trace!("Parsing msg {:?} as hex", msg); + if msg.is_empty() { + continue; // its the ID message, so skip } - }; - let size = operation.write_to_bytes()?.len(); - msg.drain((msg.len() - size)..msg.len()); - let client_id = client_id.clone(); - debug!("Operation requested: {:?}", operation.get_Op_type()); - if op_no_disk(&responder, &operation) { - continue; - } - // check if op is currently running. If so, skip it - if op_running!(&req_map, &operation) { - trace!( + while !msg.is_empty() { + let operation = match parse_from_bytes::(&msg.clone()) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + continue; + } + }; + let size = operation.write_to_bytes()?.len(); + msg.drain((msg.len() - size)..msg.len()); + let client_id = client_id.clone(); + debug!("Operation requested: {:?}", operation.get_Op_type()); + if op_no_disk(&responder, &operation) { + continue; + } + // check if op is currently running. If so, skip it + if op_running!(&req_map, &operation) { + trace!( "Operation {:?} cannot be run, disk is already running an operation", operation ); - trace!("Operations: {:?}", req_map); - let mut op_res = OpOutcomeResult::new(); - op_res.set_disk(operation.get_disk().to_string()); - op_res.set_op_type(operation.get_Op_type()); - set_outcome_result!(ok => op_res, OpOutcome::SkipRepeat, false); - let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks - continue; + trace!("Operations: {:?}", req_map); + let mut op_res = OpOutcomeResult::new(); + op_res.set_disk(operation.get_disk().to_string()); + op_res.set_op_type(operation.get_Op_type()); + set_outcome_result!(ok => op_res, OpOutcome::SkipRepeat, false); + let _ = send_res.send((client_id, op_res)); // this shouldn't error unless the channel breaks + continue; + } + op_insert(&mut req_map, &operation); + messages.push_back((operation, client_id)); + } } - op_insert(&mut req_map, &operation); - messages.push_back((operation, client_id)); } - } - } - if !messages.is_empty() { - for _ in 0..messages.len() { - let (operation, client_id) = messages.pop_front().unwrap(); //this should be safe assuming !empty - let client_id = client_id.clone(); - let (send_res, send_disk, send_ticket) = - (send_res.clone(), send_disk.clone(), send_ticket.clone()); - let backend_type = backend_type.clone(); - let config_dir = config_dir.to_path_buf(); - match operation.get_Op_type() { - Op::Add => { - let id = if operation.has_osd_id() { - Some(operation.get_osd_id()) - } else { - None - }; - pool.spawn(move || { - let disk = operation.get_disk(); - match add_disk( - &send_res, - disk, - &backend_type, - id, - config_dir.to_path_buf(), - client_id, - ) { - Ok(_) => { - info!("Add disk finished"); - } - Err(e) => { - error!("Add disk error: {:?}", e); - } + if !messages.is_empty() { + for _ in 0..messages.len() { + let (operation, client_id) = messages.pop_front().unwrap(); //this should be safe assuming !empty + let client_id = client_id.clone(); + let (send_res, send_disk, send_ticket) = + (send_res.clone(), send_disk.clone(), send_ticket.clone()); + let backend_type = backend_type.clone(); + let config_dir = config_dir.to_path_buf(); + match operation.get_Op_type() { + Op::Add => { + let id = if operation.has_osd_id() { + Some(operation.get_osd_id()) + } else { + None + }; + pool.spawn(move || { + let disk = operation.get_disk(); + match add_disk( + &send_res, + disk, + &backend_type, + id, + config_dir.to_path_buf(), + client_id, + ) { + Ok(_) => { + info!("Add disk finished"); + } + Err(e) => { + error!("Add disk error: {:?}", e); + } + } + }); } - }); - } - Op::AddPartition => { - // - } - Op::List => { - pool.spawn(move || { - match list_disks(&send_disk, client_id) { - Ok(_) => { - info!("List disks finished"); - } - Err(e) => { - error!("List disks error: {:?}", e); - } - }; - }); - } - Op::Remove => { - let mut result = OpOutcomeResult::new(); - result.set_disk(operation.get_disk().to_string()); - result.set_op_type(Op::Remove); + Op::AddPartition => { + // + } + Op::List => { + pool.spawn(move || { + match list_disks(&send_disk, client_id) { + Ok(_) => { + info!("List disks finished"); + } + Err(e) => { + error!("List disks error: {:?}", e); + } + }; + }); + } + Op::Remove => { + let mut result = OpOutcomeResult::new(); + result.set_disk(operation.get_disk().to_string()); + result.set_op_type(Op::Remove); - pool.spawn(move || { + pool.spawn(move || { match safe_to_remove( &Path::new(operation.get_disk()), &backend_type, @@ -441,71 +442,81 @@ fn listen( } }; }); - } - Op::SafeToRemove => { - pool.spawn(move || { - match safe_to_remove_disk( - &send_res, - operation.get_disk(), - &backend_type, - &config_dir, - client_id, - ) { - Ok(_) => { - info!("Safe to remove disk finished"); - } - Err(e) => { - error!("Safe to remove error: {:?}", e); - } - }; - }); - } - Op::GetCreatedTickets => { - match get_jira_tickets(&send_ticket, &config_dir, client_id) { - Ok(_) => { - info!("Fetching jira tickets finished"); } - Err(e) => { - error!("Fetching jira error: {:?}", e); + Op::SafeToRemove => { + pool.spawn(move || { + match safe_to_remove_disk( + &send_res, + operation.get_disk(), + &backend_type, + &config_dir, + client_id, + ) { + Ok(_) => { + info!("Safe to remove disk finished"); + } + Err(e) => { + error!("Safe to remove error: {:?}", e); + } + }; + }); + } + Op::GetCreatedTickets => { + match get_jira_tickets(&send_ticket, &config_dir, client_id) { + Ok(_) => { + info!("Fetching jira tickets finished"); + } + Err(e) => { + error!("Fetching jira error: {:?}", e); + } + }; } - }; + } } } - } - } - if events.contains(zmq::PollEvents::POLLOUT) { - //check disks first, since those are faster requests than add/remove reqs - match recv_disk.try_recv() { - Ok((client_id, result)) => { - // send result back to client - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); - } - Err(_) => { - // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) - match recv_ticket.try_recv() { + if events.contains(zmq::PollEvents::POLLOUT) { + //check disks first, since those are faster requests than add/remove reqs + match recv_disk.try_recv() { Ok((client_id, result)) => { + // send result back to client let _ = responder.send(&client_id, zmq::SNDMORE); let _ = respond_to_client(&result, &responder); } Err(_) => { - // no disks in the queue, check if any add/remove/safe-to-remove req results - if let Ok((client_id, result)) = recv_res.try_recv() { - //check if result is SkipRepeat, if so, skipp the assert! and insert - debug!("Send {:?}", result); - if OpOutcome::SkipRepeat != result.get_outcome() { - assert!(op_running!(req_map, &result, true)); + // check if there are tickets (also takes a while, but not as long as add/remove/safe-to-remove) + match recv_ticket.try_recv() { + Ok((client_id, result)) => { + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } + Err(_) => { + // no disks in the queue, check if any add/remove/safe-to-remove req results + if let Ok((client_id, result)) = recv_res.try_recv() { + //check if result is SkipRepeat, if so, skipp the assert! and insert + debug!("Send {:?}", result); + if OpOutcome::SkipRepeat != result.get_outcome() { + assert!(op_running!(req_map, &result, true)); + } + // set entry in req_map to None + req_map.insert(get_op_pathbuf!(&result), None); + let _ = responder.send(&client_id, zmq::SNDMORE); + let _ = respond_to_client(&result, &responder); + } } - // set entry in req_map to None - req_map.insert(get_op_pathbuf!(&result), None); - let _ = responder.send(&client_id, zmq::SNDMORE); - let _ = respond_to_client(&result, &responder); } } } } } + Err(zmq::Error::EBUSY) => { + debug!("Socket Busy, skip"); + } + Err(e) => { + error!("Get Client Socket Events errored...{:?}", e); + return Err(BynarError::from(e)); + } } + if daemon { while now.elapsed() < Duration::from_millis(10) { for signal in signals.pending() { @@ -535,7 +546,7 @@ fn listen( signal_hook::SIGTERM => { //"gracefully" exit debug!("Exit Process"); - break; + break 'outer; } _ => unreachable!(), } diff --git a/src/main.rs b/src/main.rs index fae4992..c2de603 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1268,7 +1268,7 @@ fn main() { ), ) .expect("Unable to connect to slack"); - return; + continue; } config = config_file.expect("Failed to load config"); } From 84135635fc0366f8773c38970a77bdd93cd2bf96 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 23 Apr 2020 11:47:09 -0400 Subject: [PATCH 69/76] remove extraneous sleep --- src/util.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/util.rs b/src/util.rs index f054df6..ba9cf6a 100644 --- a/src/util.rs +++ b/src/util.rs @@ -26,8 +26,7 @@ macro_rules! poll_events { ($s:expr, $ret:expr) => { match $s.get_events() { Err(zmq::Error::EBUSY) => { - debug!("Socket Busy, skip"); - std::thread::sleep(std::time::Duration::from_millis(100)); + trace!("Socket Busy, skip"); $ret; } Err(e) => { From 0d6dc3f986637e32f907301850fdf63add8cc850 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 23 Apr 2020 14:53:09 -0400 Subject: [PATCH 70/76] add explicit sleep to macro for polling events --- src/client.rs | 8 ++++---- src/util.rs | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index 484a37b..6ede37e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -32,7 +32,7 @@ fn add_disk( let mut sent = false; //loop until socket is readable, then get the response loop { - let events = poll_events!(s, continue); + let events = poll_events!(s, continue, 5000); //check if writable before sending request if events.contains(zmq::PollEvents::POLLOUT) && !sent { helpers::add_disk_request(s, path, id, client_id.clone(), simulate)?; @@ -65,7 +65,7 @@ fn list_disks(s: &Socket, client_id: Vec) -> BynarResult> { //loop until socket is readable, then get the response let mut sent = false; loop { - let events = poll_events!(s, continue); + let events = poll_events!(s, continue, 100); //check if writable before sending request if events.contains(zmq::PollEvents::POLLOUT) && !sent { helpers::list_disks_request(s, client_id.clone())?; @@ -98,7 +98,7 @@ fn remove_disk( //loop until socket is readable, then get the response loop { - let events = poll_events!(s, continue); + let events = poll_events!(s, continue, 1000); //check if writable before sending request if events.contains(zmq::PollEvents::POLLOUT) && !sent { helpers::remove_disk_request(s, path, id, client_id.clone(), simulate)?; @@ -173,7 +173,7 @@ fn handle_jira_tickets(s: &Socket, client_id: Vec) -> BynarResult<()> { let mut sent = false; //loop until socket is readable, then get the response loop { - let events = poll_events!(s, continue); + let events = poll_events!(s, continue, 1000); //check if writable before sending request if events.contains(zmq::PollEvents::POLLOUT) && !sent { helpers::get_jira_tickets(s, client_id.clone())?; diff --git a/src/util.rs b/src/util.rs index ba9cf6a..20b1ecb 100644 --- a/src/util.rs +++ b/src/util.rs @@ -36,6 +36,20 @@ macro_rules! poll_events { Ok(e) => e, } }; + ($s:expr, $ret:expr, $sleep:expr) => { + match $s.get_events() { + Err(zmq::Error::EBUSY) => { + trace!("Socket Busy, skip"); + std::thread::sleep(std::time::Duration::from_millis($sleep)); + $ret; + } + Err(e) => { + error!("Get Client Socket Events errored...{:?}", e); + return Err(BynarError::from(e)); + } + Ok(e) => e, + } + }; } #[macro_export] From a43e44418785cdf24e2f9519dbff912780fd2ce0 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Mon, 4 May 2020 14:41:00 -0400 Subject: [PATCH 71/76] Fix the fd leak --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- src/test_disk.rs | 14 +++++++------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0182eb7..fb64548 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ "init-daemon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "json 0.11.15 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libatasmart 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libatasmart 0.1.5 (git+https://github.com/mzhong1/libatasmart-1.git)", "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "libredfish 0.1.0 (git+https://github.com/cholcombe973/libredfish?branch=generic)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "libatasmart" version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" +source = "git+https://github.com/mzhong1/libatasmart-1.git#9e53ce34c637a97e677301bf35e5c50259adbe20" dependencies = [ "libatasmart-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3202,7 +3202,7 @@ dependencies = [ "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f86d66d380c9c5a685aaac7a11818bdfa1f733198dfd9ec09c70b762cd12ad6f" -"checksum libatasmart 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e4c1c2a1c51b8f754a0962e602cee7d14cf57596ed5ef07720e7e531d5acda6b" +"checksum libatasmart 0.1.5 (git+https://github.com/mzhong1/libatasmart-1.git)" = "" "checksum libatasmart-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "deff6b65973033ac013e467b85af14cc7f9484e7cc8f9d3f11ee54db3e17e841" "checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" "checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" diff --git a/Cargo.toml b/Cargo.toml index 86df9ad..9c1f870 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ hashicorp_vault = { git = "https://github.com/cholcombe973/vault-rs.git" } hostname = "~0.1" init-daemon = "~0.1" json = "~0.11" -libatasmart = "~0.1" +libatasmart = {git = "https://github.com/mzhong1/libatasmart-1.git"}#"~0.1" libc = "~0.2" libredfish = {git = "https://github.com/cholcombe973/libredfish", branch = "generic"} log = "~0.4" diff --git a/src/test_disk.rs b/src/test_disk.rs index af2fca8..bbe8d6f 100644 --- a/src/test_disk.rs +++ b/src/test_disk.rs @@ -735,7 +735,7 @@ impl Transition for Scan { debug!("thread {} running Scan transition", process::id()); let raid_backed = is_raid_backed(&scsi_info); match (raid_backed.0, raid_backed.1) { - (false, _) => match run_smart_checks(&Path::new(&device.dev_path)) { + (false, _) => match run_smart_checks(&device.dev_path) { Ok(stat) => { device.smart_passed = stat; // If the device is a Disk, and is not mounted then end the state machine here. @@ -753,7 +753,7 @@ impl Transition for Scan { to_state } Err(e) => { - error!("Smart test failed: {:?}", e); + debug!("Smart test failed: {:?}", e); State::Fail } }, @@ -1525,7 +1525,7 @@ fn run_smartctl_check(device: &Path) -> BynarResult { // no errors, smart enabled 0 => trace!("smartctl enabled"), // could not enable smart checks, should still be able to run smart health checks though - _ => error!("smartctl could not enable smart checks"), + _ => debug!("smartctl could not enable smart checks"), }, //Process terminated by signal None => return Err(BynarError::from("smartctl terminated by signal")), @@ -1551,16 +1551,16 @@ fn run_smart_checks(device: &Path) -> BynarResult { match smart.get_smart_status() { Ok(stat) => stat, Err(e) => { - error!("Error {:?} Run SmartMonTools", e); + debug!("Error {:?} Run SmartMonTools", e); // If ata smart fails, run smartmontools - return run_smartctl_check(device); + run_smartctl_check(device)? } } } Err(e) => { - error!("Error {:?} Run SmartMonTools", e); + debug!("Error {:?} Run SmartMonTools", e); // If ata smart fails, run smartmontools - return run_smartctl_check(device); + run_smartctl_check(device)? } }; From af492acc55b0278689fbf5be7be5152ece99f25f Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 7 May 2020 16:42:09 -0400 Subject: [PATCH 72/76] Update libatasmart --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb64548..26d4e2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ "init-daemon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "json 0.11.15 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libatasmart 0.1.5 (git+https://github.com/mzhong1/libatasmart-1.git)", + "libatasmart 0.1.5 (git+https://github.com/cholcombe973/libatasmart.git)", "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)", "libredfish 0.1.0 (git+https://github.com/cholcombe973/libredfish?branch=generic)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "libatasmart" version = "0.1.5" -source = "git+https://github.com/mzhong1/libatasmart-1.git#9e53ce34c637a97e677301bf35e5c50259adbe20" +source = "git+https://github.com/cholcombe973/libatasmart.git#f95e055885ec7d66ecd9b36bb5807e6f40d2194a" dependencies = [ "libatasmart-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3202,7 +3202,7 @@ dependencies = [ "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f86d66d380c9c5a685aaac7a11818bdfa1f733198dfd9ec09c70b762cd12ad6f" -"checksum libatasmart 0.1.5 (git+https://github.com/mzhong1/libatasmart-1.git)" = "" +"checksum libatasmart 0.1.5 (git+https://github.com/cholcombe973/libatasmart.git)" = "" "checksum libatasmart-sys 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "deff6b65973033ac013e467b85af14cc7f9484e7cc8f9d3f11ee54db3e17e841" "checksum libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" "checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" diff --git a/Cargo.toml b/Cargo.toml index 9c1f870..523562b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ hashicorp_vault = { git = "https://github.com/cholcombe973/vault-rs.git" } hostname = "~0.1" init-daemon = "~0.1" json = "~0.11" -libatasmart = {git = "https://github.com/mzhong1/libatasmart-1.git"}#"~0.1" +libatasmart = {git = "https://github.com/cholcombe973/libatasmart.git"}#"~0.1" libc = "~0.2" libredfish = {git = "https://github.com/cholcombe973/libredfish", branch = "generic"} log = "~0.4" From 73b2ea6052ede523df8ea9c6fe987b2f490ea9bc Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 7 May 2020 16:51:32 -0400 Subject: [PATCH 73/76] update ceph crates --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26d4e2a..d4a01fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -202,8 +202,8 @@ dependencies = [ [[package]] name = "block-utils" -version = "0.6.2" -source = "git+https://github.com/mzhong1/block-utils.git#11457007ff4a274d13a40dc515827fd7f3dfb10f" +version = "0.7.0" +source = "git+https://github.com/cholcombe973/block-utils.git#17cf8e2c7ed026396dd0a0d44b993b4c056cf95e" dependencies = [ "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -231,9 +231,9 @@ version = "0.2.0" dependencies = [ "api 0.1.0", "blkid 0.2.1 (git+https://github.com/cholcombe973/blkid.git)", - "block-utils 0.6.2 (git+https://github.com/mzhong1/block-utils.git)", + "block-utils 0.7.0 (git+https://github.com/cholcombe973/block-utils.git)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "ceph 3.2.0 (git+https://github.com/mzhong1/ceph-rust)", + "ceph 3.2.4 (git+https://github.com/ceph/ceph-rust.git)", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -321,8 +321,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ceph" -version = "3.2.0" -source = "git+https://github.com/mzhong1/ceph-rust#27194ca6c0d3f4d321c8c04df533d4bf69a2f9ea" +version = "3.2.4" +source = "git+https://github.com/ceph/ceph-rust.git#f576686178f0b6fc458785c3b5e887c11d8d22c8" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3105,7 +3105,7 @@ dependencies = [ "checksum blkid 0.2.1 (git+https://github.com/cholcombe973/blkid.git)" = "" "checksum blkid-sys 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "087ef4e32044243ce0b523b4feebfa69bdccce71ab1f88ba702812c956cd7588" "checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab" -"checksum block-utils 0.6.2 (git+https://github.com/mzhong1/block-utils.git)" = "" +"checksum block-utils 0.7.0 (git+https://github.com/cholcombe973/block-utils.git)" = "" "checksum boxfnonce 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5988cb1d626264ac94100be357308f29ff7cbdd3b36bda27f450a4ee3f713426" "checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" "checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" @@ -3114,7 +3114,7 @@ dependencies = [ "checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum case 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e88b166b48e29667f5443df64df3c61dc07dc2b1a0b0d231800e07f09a33ecc1" "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" -"checksum ceph 3.2.0 (git+https://github.com/mzhong1/ceph-rust)" = "" +"checksum ceph 3.2.4 (git+https://github.com/ceph/ceph-rust.git)" = "" "checksum cexpr 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "42aac45e9567d97474a834efdee3081b3c942b2205be932092f53354ce503d6c" "checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" "checksum chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" diff --git a/Cargo.toml b/Cargo.toml index 523562b..17b78db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,9 +40,9 @@ rand = "~0.7" [dependencies] api = { path = "api" } blkid = {git = "https://github.com/cholcombe973/blkid.git"}#"~0.2" -block-utils = {git = "https://github.com/mzhong1/block-utils.git"}#"~0.6" +block-utils = {git = "https://github.com/cholcombe973/block-utils.git"}#"~0.6" bytes = "*" -ceph = {git = "https://github.com/mzhong1/ceph-rust"}#"~3.0" +ceph = {git = "https://github.com/ceph/ceph-rust.git"}#"~3.0" chrono = "~0.4" clap = "~2" crossbeam = "~0.7" From fa685d239704bd5f57cf7bfd0bb0fa116ea48573 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Thu, 14 May 2020 11:14:19 -0400 Subject: [PATCH 74/76] Fix soft error printing so common errors are changed to debug to prevent exploding log sizes --- src/disk_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/disk_manager.rs b/src/disk_manager.rs index f7aae2a..27c26f8 100644 --- a/src/disk_manager.rs +++ b/src/disk_manager.rs @@ -305,7 +305,7 @@ fn listen( let operation = match parse_from_bytes::(&msg.clone()) { Ok(bytes) => bytes, Err(e) => { - error!("Failed to parse_from_bytes {:?}. Ignoring request", e); + debug!("Failed to parse_from_bytes {:?}. Ignoring request", e); continue; } }; From 82e5276d9e0ef8adf96ba8a8c1018f631c3710f3 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 12 Jun 2020 10:02:53 -0400 Subject: [PATCH 75/76] fix JIRA crate version --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- src/create_support_ticket.rs | 22 +++++++++++++++------- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4a01fa..4b94688 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -243,7 +243,7 @@ dependencies = [ "dmi 0.1.0 (git+https://github.com/cholcombe973/dmi)", "fstab 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96)", + "goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=e0006da520c1afb443e1fbc2c201ccaed50345d9)", "gpt 1.0.0 (git+https://github.com/Quyzi/gpt)", "hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)", "hostname 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -845,7 +845,7 @@ dependencies = [ [[package]] name = "goji" version = "0.2.4" -source = "git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96#96aef5ea21d8b5ec3d83209de708fa2e13cd5c96" +source = "git+https://github.com/cholcombe973/goji.git?rev=e0006da520c1afb443e1fbc2c201ccaed50345d9#e0006da520c1afb443e1fbc2c201ccaed50345d9" dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3174,7 +3174,7 @@ dependencies = [ "checksum getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum gluster 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74fb74c4d5972b3aa616a9ceacc93f43d8ff9c189fd8530243d3feb32a672d62" -"checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=96aef5ea21d8b5ec3d83209de708fa2e13cd5c96)" = "" +"checksum goji 0.2.4 (git+https://github.com/cholcombe973/goji.git?rev=e0006da520c1afb443e1fbc2c201ccaed50345d9)" = "" "checksum gpt 1.0.0 (git+https://github.com/Quyzi/gpt)" = "" "checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" "checksum hashicorp_vault 1.0.0 (git+https://github.com/cholcombe973/vault-rs.git)" = "" diff --git a/Cargo.toml b/Cargo.toml index 17b78db..7d37d18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ dirs = "~2.0" dmi = {git = "https://github.com/cholcombe973/dmi"} fstab = "~0.3" gluster = "~1.0" -goji = { git = "https://github.com/cholcombe973/goji.git", rev = "96aef5ea21d8b5ec3d83209de708fa2e13cd5c96" } +goji = { git = "https://github.com/cholcombe973/goji.git", rev = "e0006da520c1afb443e1fbc2c201ccaed50345d9"} gpt = { git = "https://github.com/Quyzi/gpt"} hashicorp_vault = { git = "https://github.com/cholcombe973/vault-rs.git" } hostname = "~0.1" diff --git a/src/create_support_ticket.rs b/src/create_support_ticket.rs index 1fb2000..513ab0b 100644 --- a/src/create_support_ticket.rs +++ b/src/create_support_ticket.rs @@ -13,15 +13,23 @@ pub fn create_support_ticket( ) -> BynarResult { let issue_description = CreateIssue { fields: Fields { - assignee: Assignee { name: settings.jira_user.clone() }, - components: vec![Component { name: "Ceph".into() }], + assignee: Assignee { + name: settings.jira_ticket_assignee.clone(), + }, + components: vec![Component { + name: "Ceph".into(), + }], description: description.into(), - issuetype: IssueType { id: settings.jira_issue_type.clone() }, - priority: Priority { id: settings.jira_priority.clone() }, - project: Project { key: settings.jira_project_id.clone() }, + issuetype: IssueType { + id: settings.jira_issue_type.clone(), + }, + priority: Priority { + id: settings.jira_priority.clone(), + }, + project: Project { + key: settings.jira_project_id.clone(), + }, summary: title.into(), - reporter: Assignee { name: settings.jira_ticket_assignee.clone() }, - environment: "".to_string(), }, }; let jira: Jira = match settings.proxy { From 7477053d6a95ac4a7053e6121c0190d7c68c4a60 Mon Sep 17 00:00:00 2001 From: mzhong820 Date: Fri, 12 Jun 2020 10:04:44 -0400 Subject: [PATCH 76/76] fix remove operation on successful Add so it removes all operations on the disk instead of the partition only --- src/main.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index c2de603..db62a35 100644 --- a/src/main.rs +++ b/src/main.rs @@ -586,6 +586,7 @@ fn add_repaired_disks( ); let tid = Some(ticket.ticket_id.to_string()); message_queue.push_back((op, tid, None)); + let _ = notify_slack(config, &format!("Creating Add disk operation request for {}", &ticket.device_path)); } Ok(false) => {} Err(e) => { @@ -621,9 +622,12 @@ fn handle_add_disk_res( pool: &Pool, outcome: &OpOutcomeResult, ticket_id: String, + config: &ConfigSettings, + disk_name: &str, ) { match outcome.get_outcome() { - OpOutcome::Success => debug!("Disk added successfully. Updating database record"), + OpOutcome::Success => {debug!("Disk added successfully. Updating database record"); + let _ = notify_slack(config, &format!("Disk {} added successfully", disk_name));}, // Disk was either boot or something that shouldn't be added via backend OpOutcome::Skipped => debug!("Disk Skipped. Updating database record"), // Disk is already in the cluster @@ -782,6 +786,7 @@ fn handle_operation_result( match op_res.get_op_type() { Op::Add => { error!("Add disk failed : {}", msg); + notify_slack(config, &format!("Add disk failed : {}", msg)); return Err(BynarError::from(msg)); } Op::Remove => { @@ -803,9 +808,15 @@ fn handle_operation_result( Op::Add => { if let Some(disk_op) = get_map_op(message_map, &dev_path.to_path_buf())? { if let Some(ticket_id) = disk_op.description { - handle_add_disk_res(pool, &op_res, ticket_id); + handle_add_disk_res(pool, &op_res, ticket_id, config, op_res.get_disk()); //update result in the map (in otherwords, just set it to None) - remove_map_op(message_map, &dev_path.to_path_buf())?; + let map = get_disk_map_op(message_map, &dev_path.to_path_buf())?; + info!("Disk map: {:X?}", map); + for (path, _) in map { + info!("Path: {}", path.display()); + let _ = remove_map_op(message_map, &path.to_path_buf())?; + } + info!("{:X?}", message_map); return Ok(()); } }