Skip to content

Commit

Permalink
Merge branch 'tim/fix-comments-typos' into 'master'
Browse files Browse the repository at this point in the history
chore(consensus-manager): Fix typos and update stale comments

Fixes typos and stale comments pointed out by codegov. Also fixes other typos found by the spell checker. 

See merge request dfinity-lab/public/ic!16637
  • Loading branch information
rumenov committed Dec 12, 2023
2 parents 7fd682d + 9535f9c commit 6bfb53b
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 24 deletions.
4 changes: 2 additions & 2 deletions rs/p2p/consensus_manager/src/metrics.rs
Expand Up @@ -117,7 +117,7 @@ impl ConsensusManagerMetrics {
download_task_artifact_download_errors_total: metrics_registry.register(
IntCounter::with_opts(opts!(
"ic_consensus_manager_download_task_artifact_download_errors_total",
"Error occured when downloading artifact.",
"Error occurred when downloading artifact.",
const_labels.clone(),
))
.unwrap(),
Expand Down Expand Up @@ -252,7 +252,7 @@ impl ConsensusManagerMetrics {
slot_manager_maximum_slots_total: metrics_registry.register(
IntCounter::with_opts(opts!(
"ic_consensus_manager_slot_manager_maximum_slots_total",
"Maxumum of slots simultaneously used.",
"Maximum of slots simultaneously used.",
const_labels.clone(),
))
.unwrap(),
Expand Down
16 changes: 8 additions & 8 deletions rs/p2p/consensus_manager/src/receiver.rs
Expand Up @@ -224,8 +224,7 @@ where
}

/// Event loop that processes advert updates and artifact downloads.
/// The event loop preserves the following invariant:
/// - The number of download tasks is equal to the total number of distinct slot entries.
/// The event loop preserves the invariants checked with `debug_assert`.
async fn start_event_loop(mut self) {
let mut priority_fn_interval = time::interval(PRIORITY_FUNCTION_UPDATE_INTERVAL);
priority_fn_interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
Expand Down Expand Up @@ -435,7 +434,7 @@ where
}
}

/// Waits until advert resolves to fetch. If all peers are removed or priority becomes drop a `DownloadResult` is returned.
/// Waits until advert resolves to fetch. If all peers are removed or priority becomes drop `DownloadStopped` is returned.
async fn wait_fetch(
id: &Artifact::Id,
attr: &Artifact::Attribute,
Expand Down Expand Up @@ -484,8 +483,9 @@ where
/// The download will be scheduled based on the given priority function, `priority_fn_watcher`.
///
/// The download fails iff:
/// - The priority function evaluates the advert to [`Priority::Drop`] -> [`DownloadResult::PriorityIsDrop`]
/// - The set of peers advertising the artifact, `peer_rx`, becomes empty -> [`DownloadResult::AllPeersDeletedTheArtifact`]
/// - The priority function evaluates the advert to [`Priority::Drop`] -> [`DownloadStopped::PriorityIsDrop`]
/// - The set of peers advertising the artifact, `peer_rx`, becomes empty -> [`DownloadStopped::AllPeersDeletedTheArtifact`]
/// and the failure condition is reported in the error variant of the returned result.
async fn download_artifact(
log: ReplicaLogger,
id: &Artifact::Id,
Expand Down Expand Up @@ -574,7 +574,7 @@ where

/// Tries to download the given artifact, and insert it into the unvalidated pool.
///
/// This future completes waits for all peers that advertise the artifact to delete it.
/// This future waits for all peers that advertise the artifact to delete it.
/// The artifact is deleted from the unvalidated pool upon completion.
async fn process_advert(
log: ReplicaLogger,
Expand Down Expand Up @@ -755,7 +755,7 @@ mod tests {
}
}

/// Check that all variants of stale advers to not get added to the slot table.
/// Check that all variants of stale adverts to not get added to the slot table.
#[test]
fn receiving_stale_advert_updates() {
// Abort process if a thread panics. This catches detached tokio tasks that panic.
Expand Down Expand Up @@ -911,7 +911,7 @@ mod tests {
});
}

/// Check that adverts updates with higher connnection ids take precedence.
/// Check that adverts updates with higher connection ids take precedence.
#[test]
fn overwrite_slot() {
// Abort process if a thread panics. This catches detached tokio tasks that panic.
Expand Down
8 changes: 1 addition & 7 deletions rs/p2p/consensus_manager/src/sender.rs
Expand Up @@ -145,12 +145,6 @@ impl<Artifact: ArtifactKind> ConsensusManagerSender<Artifact> {
}

/// Sends an advert to all peers.
///
/// Memory Consumption:
/// - JoinMap: #peers * (32 + ~32)
/// - HashMap: #peers * (32 + 8)
/// - advert: ±200
/// For 10k tasks ~50Mb
async fn send_advert_to_all_peers(
rt_handle: Handle,
log: ReplicaLogger,
Expand Down Expand Up @@ -199,7 +193,7 @@ impl<Artifact: ArtifactKind> ConsensusManagerSender<Artifact> {
let body = Bytes::from(pb::AdvertUpdate::proxy_encode(advert_update));

let mut in_progress_transmissions = JoinSet::new();
// stores the connection ID of the last successful transmission to a peer.
// Stores the connection ID of the last successful transmission to a peer.
let mut initiated_transmissions: HashMap<NodeId, ConnId> = HashMap::new();
let mut periodic_check_interval = time::interval(Duration::from_secs(5));

Expand Down
14 changes: 7 additions & 7 deletions rs/p2p/consensus_manager/tests/test.rs
Expand Up @@ -254,7 +254,7 @@ fn test_flapping_connection_does_not_cause_duplicate_artifact_downloads() {
});
}

fn start_consenus_manager(
fn start_consensus_manager(
log: ReplicaLogger,
rt_handle: Handle,
node_id: NodeId,
Expand Down Expand Up @@ -282,7 +282,7 @@ fn start_consenus_manager(
artifact_processor_jh
}

async fn generate_consenus_events(
async fn generate_consensus_events(
processor: TestConsensus<U64Artifact>,
test_duration: Duration,
purge_fraction: f64,
Expand Down Expand Up @@ -330,7 +330,7 @@ fn check_pools_equal(node_pool_map: &HashMap<NodeId, TestConsensus<U64Artifact>>
if node2 != node1 {
// If other pool subset everything is fine
if !pool1.my_pool().is_subset(&pool2.peer_pool(node1)) {
// It can be case that muliple peers advertised same id and it only got downloaded from a different peer.
// It can be case that multiple peers advertised same id and it only got downloaded from a different peer.
// In that case check that the id is contained in some other pool.
for diff in pool1.my_pool().difference(&pool2.peer_pool(node1)) {
let mut found = false;
Expand All @@ -351,7 +351,7 @@ fn check_pools_equal(node_pool_map: &HashMap<NodeId, TestConsensus<U64Artifact>>
}

/// Generates load test for the consensus manager by generating a event sequence of pool additions/removals and verifies
/// that the pools contains all expected elements according to `check_pools_equal` after all eveents were emitted.
/// that the pools contains all expected elements according to `check_pools_equal` after all events were emitted.
/// num_peers: Number of consensus managers to spawn
/// num_events: Number of add/remove events to generate.
/// purge_fraction: Number of add events that have a corresponding purge event. Setting it to 1 means that every add event has remove event that follows later.
Expand Down Expand Up @@ -386,7 +386,7 @@ fn load_test(
for i in 0..num_peers {
let node = node_test_id(i);
let processor = TestConsensus::new(log.clone(), node);
let jh = start_consenus_manager(
let jh = start_consensus_manager(
no_op_logger(),
rt.handle().clone(),
node,
Expand All @@ -403,7 +403,7 @@ fn load_test(
let mut load_set = JoinSet::new();
// Generate some random load
for (i, processor) in node_advert_map.values().enumerate() {
load_set.spawn(generate_consenus_events(
load_set.spawn(generate_consensus_events(
processor.clone(),
TEST_DURATION,
purge_fraction,
Expand Down Expand Up @@ -432,7 +432,7 @@ fn load_test(
});
}
/// NOTE: The values used for the tests below do not test anything specific and are set
/// to cover a variaty of scenrios. The error signal of these tests is flakiness which indicate
/// to cover a variety of scenarios. The error signal of these tests is flakiness which indicate
/// that there might be some hidden race condition in the code.
///
///
Expand Down

0 comments on commit 6bfb53b

Please sign in to comment.