From 9535f9c221a9b7c307820f30197862bd5c57d5e1 Mon Sep 17 00:00:00 2001 From: Tim Gretler Date: Tue, 12 Dec 2023 19:34:35 +0000 Subject: [PATCH] chore(consensus-manager): Fix typos and update stale comments --- rs/p2p/consensus_manager/src/metrics.rs | 4 ++-- rs/p2p/consensus_manager/src/receiver.rs | 16 ++++++++-------- rs/p2p/consensus_manager/src/sender.rs | 8 +------- rs/p2p/consensus_manager/tests/test.rs | 14 +++++++------- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/rs/p2p/consensus_manager/src/metrics.rs b/rs/p2p/consensus_manager/src/metrics.rs index ef6ad8e5841..7b032decb85 100644 --- a/rs/p2p/consensus_manager/src/metrics.rs +++ b/rs/p2p/consensus_manager/src/metrics.rs @@ -117,7 +117,7 @@ impl ConsensusManagerMetrics { download_task_artifact_download_errors_total: metrics_registry.register( IntCounter::with_opts(opts!( "ic_consensus_manager_download_task_artifact_download_errors_total", - "Error occured when downloading artifact.", + "Error occurred when downloading artifact.", const_labels.clone(), )) .unwrap(), @@ -252,7 +252,7 @@ impl ConsensusManagerMetrics { slot_manager_maximum_slots_total: metrics_registry.register( IntCounter::with_opts(opts!( "ic_consensus_manager_slot_manager_maximum_slots_total", - "Maxumum of slots simultaneously used.", + "Maximum of slots simultaneously used.", const_labels.clone(), )) .unwrap(), diff --git a/rs/p2p/consensus_manager/src/receiver.rs b/rs/p2p/consensus_manager/src/receiver.rs index 5c1069d15db..ea454b0237c 100644 --- a/rs/p2p/consensus_manager/src/receiver.rs +++ b/rs/p2p/consensus_manager/src/receiver.rs @@ -224,8 +224,7 @@ where } /// Event loop that processes advert updates and artifact downloads. - /// The event loop preserves the following invariant: - /// - The number of download tasks is equal to the total number of distinct slot entries. + /// The event loop preserves the invariants checked with `debug_assert`. async fn start_event_loop(mut self) { let mut priority_fn_interval = time::interval(PRIORITY_FUNCTION_UPDATE_INTERVAL); priority_fn_interval.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -433,7 +432,7 @@ where } } - /// Waits until advert resolves to fetch. If all peers are removed or priority becomes drop a `DownloadResult` is returned. + /// Waits until advert resolves to fetch. If all peers are removed or priority becomes drop `DownloadStopped` is returned. async fn wait_fetch( id: &Artifact::Id, attr: &Artifact::Attribute, @@ -482,8 +481,9 @@ where /// The download will be scheduled based on the given priority function, `priority_fn_watcher`. /// /// The download fails iff: - /// - The priority function evaluates the advert to [`Priority::Drop`] -> [`DownloadResult::PriorityIsDrop`] - /// - The set of peers advertising the artifact, `peer_rx`, becomes empty -> [`DownloadResult::AllPeersDeletedTheArtifact`] + /// - The priority function evaluates the advert to [`Priority::Drop`] -> [`DownloadStopped::PriorityIsDrop`] + /// - The set of peers advertising the artifact, `peer_rx`, becomes empty -> [`DownloadStopped::AllPeersDeletedTheArtifact`] + /// and the failure condition is reported in the error variant of the returned result. async fn download_artifact( id: &Artifact::Id, attr: &Artifact::Attribute, @@ -563,7 +563,7 @@ where /// Tries to download the given artifact, and insert it into the unvalidated pool. /// - /// This future completes waits for all peers that advertise the artifact to delete it. + /// This future waits for all peers that advertise the artifact to delete it. /// The artifact is deleted from the unvalidated pool upon completion. async fn process_advert( id: Artifact::Id, @@ -740,7 +740,7 @@ mod tests { } } - /// Check that all variants of stale advers to not get added to the slot table. + /// Check that all variants of stale adverts to not get added to the slot table. #[test] fn receiving_stale_advert_updates() { // Abort process if a thread panics. This catches detached tokio tasks that panic. @@ -896,7 +896,7 @@ mod tests { }); } - /// Check that adverts updates with higher connnection ids take precedence. + /// Check that adverts updates with higher connection ids take precedence. #[test] fn overwrite_slot() { // Abort process if a thread panics. This catches detached tokio tasks that panic. diff --git a/rs/p2p/consensus_manager/src/sender.rs b/rs/p2p/consensus_manager/src/sender.rs index 593cbe22bf7..7cfb452fbac 100644 --- a/rs/p2p/consensus_manager/src/sender.rs +++ b/rs/p2p/consensus_manager/src/sender.rs @@ -145,12 +145,6 @@ impl ConsensusManagerSender { } /// Sends an advert to all peers. - /// - /// Memory Consumption: - /// - JoinMap: #peers * (32 + ~32) - /// - HashMap: #peers * (32 + 8) - /// - advert: ±200 - /// For 10k tasks ~50Mb async fn send_advert_to_all_peers( rt_handle: Handle, log: ReplicaLogger, @@ -199,7 +193,7 @@ impl ConsensusManagerSender { let body = Bytes::from(pb::AdvertUpdate::proxy_encode(advert_update)); let mut in_progress_transmissions = JoinSet::new(); - // stores the connection ID of the last successful transmission to a peer. + // Stores the connection ID of the last successful transmission to a peer. let mut initiated_transmissions: HashMap = HashMap::new(); let mut periodic_check_interval = time::interval(Duration::from_secs(5)); diff --git a/rs/p2p/consensus_manager/tests/test.rs b/rs/p2p/consensus_manager/tests/test.rs index ecd9235ab6c..15f6c9ce473 100644 --- a/rs/p2p/consensus_manager/tests/test.rs +++ b/rs/p2p/consensus_manager/tests/test.rs @@ -254,7 +254,7 @@ fn test_flapping_connection_does_not_cause_duplicate_artifact_downloads() { }); } -fn start_consenus_manager( +fn start_consensus_manager( log: ReplicaLogger, rt_handle: Handle, node_id: NodeId, @@ -282,7 +282,7 @@ fn start_consenus_manager( artifact_processor_jh } -async fn generate_consenus_events( +async fn generate_consensus_events( processor: TestConsensus, test_duration: Duration, purge_fraction: f64, @@ -330,7 +330,7 @@ fn check_pools_equal(node_pool_map: &HashMap> if node2 != node1 { // If other pool subset everything is fine if !pool1.my_pool().is_subset(&pool2.peer_pool(node1)) { - // It can be case that muliple peers advertised same id and it only got downloaded from a different peer. + // It can be case that multiple peers advertised same id and it only got downloaded from a different peer. // In that case check that the id is contained in some other pool. for diff in pool1.my_pool().difference(&pool2.peer_pool(node1)) { let mut found = false; @@ -351,7 +351,7 @@ fn check_pools_equal(node_pool_map: &HashMap> } /// Generates load test for the consensus manager by generating a event sequence of pool additions/removals and verifies -/// that the pools contains all expected elements according to `check_pools_equal` after all eveents were emitted. +/// that the pools contains all expected elements according to `check_pools_equal` after all events were emitted. /// num_peers: Number of consensus managers to spawn /// num_events: Number of add/remove events to generate. /// purge_fraction: Number of add events that have a corresponding purge event. Setting it to 1 means that every add event has remove event that follows later. @@ -386,7 +386,7 @@ fn load_test( for i in 0..num_peers { let node = node_test_id(i); let processor = TestConsensus::new(log.clone(), node); - let jh = start_consenus_manager( + let jh = start_consensus_manager( no_op_logger(), rt.handle().clone(), node, @@ -403,7 +403,7 @@ fn load_test( let mut load_set = JoinSet::new(); // Generate some random load for (i, processor) in node_advert_map.values().enumerate() { - load_set.spawn(generate_consenus_events( + load_set.spawn(generate_consensus_events( processor.clone(), TEST_DURATION, purge_fraction, @@ -432,7 +432,7 @@ fn load_test( }); } /// NOTE: The values used for the tests below do not test anything specific and are set -/// to cover a variaty of scenrios. The error signal of these tests is flakiness which indicate +/// to cover a variety of scenarios. The error signal of these tests is flakiness which indicate /// that there might be some hidden race condition in the code. /// ///