From f63db92d8009901d705c1e395feb102fdbfef962 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 15 May 2020 13:58:52 +0200 Subject: [PATCH] Fix lots of small nits in sc-network (#6028) * Fix lots of small nits in sc-network * Update client/network/src/protocol/sync/blocks.rs Co-authored-by: Arkadiy Paronyan * Fix warning * Yes. The line width. Co-authored-by: Arkadiy Paronyan --- client/network/src/behaviour.rs | 4 +- client/network/src/block_requests.rs | 4 +- client/network/src/debug_info.rs | 2 +- client/network/src/discovery.rs | 2 +- client/network/src/light_client_handler.rs | 4 +- client/network/src/on_demand_layer.rs | 2 +- client/network/src/protocol.rs | 32 ++++---- .../src/protocol/generic_proto/behaviour.rs | 16 ++-- .../protocol/generic_proto/handler/group.rs | 18 ++--- .../generic_proto/upgrade/notifications.rs | 4 +- client/network/src/protocol/sync.rs | 74 ++++++++++--------- client/network/src/protocol/sync/blocks.rs | 49 ++++++------ .../src/protocol/sync/extra_requests.rs | 2 - client/network/src/service.rs | 10 +-- 14 files changed, 111 insertions(+), 112 deletions(-) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 656743ef7f72d..2394d22319e25 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -113,7 +113,7 @@ impl Behaviour { ) -> Self { Behaviour { substrate, - debug_info: debug_info::DebugInfoBehaviour::new(user_agent, local_public_key.clone()), + debug_info: debug_info::DebugInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), block_requests, finality_proof_requests, @@ -369,7 +369,7 @@ impl NetworkBehaviourEventProcess Self { let identify = { let proto_version = "/substrate/1.0".to_string(); - Identify::new(proto_version, user_agent, local_public_key.clone()) + Identify::new(proto_version, user_agent, local_public_key) }; DebugInfoBehaviour { diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index f5252e2695ae8..00adc56ec591d 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -675,7 +675,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { continue; } - self.discoveries.extend(list.into_iter().map(|(peer_id, _)| peer_id)); + self.discoveries.extend(list.map(|(peer_id, _)| peer_id)); if let Some(peer_id) = self.discoveries.pop_front() { let ev = DiscoveryOut::Discovered(peer_id); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 5080d16ead68a..236ae817474ab 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -980,7 +980,9 @@ where let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); let request_id = self.next_request_id(); - self.peers.get_mut(&peer).map(|p| p.status = PeerStatus::BusyWith(request_id)); + if let Some(p) = self.peers.get_mut(&peer) { + p.status = PeerStatus::BusyWith(request_id); + } self.outstanding.insert(request_id, request); let event = OutboundProtocol { diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index c4d52db351a34..8cf7717407604 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -220,7 +220,7 @@ impl Future for RemoteResponse { fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match self.receiver.poll_unpin(cx) { Poll::Ready(Ok(res)) => Poll::Ready(res), - Poll::Ready(Err(_)) => Poll::Ready(Err(From::from(ClientError::RemoteFetchCancelled))), + Poll::Ready(Err(_)) => Poll::Ready(Err(ClientError::RemoteFetchCancelled)), Poll::Pending => Poll::Pending, } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index e9f7b3eeed698..7662a476e9a6f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -341,7 +341,7 @@ impl BlockAnnouncesHandshake { let info = chain.info(); BlockAnnouncesHandshake { genesis_hash: info.genesis_hash, - roles: protocol_config.roles.into(), + roles: protocol_config.roles, best_number: info.best_number, best_hash: info.best_hash, } @@ -543,7 +543,7 @@ impl Protocol { response: &message::BlockResponse ) -> Option> { if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - if let Some(_) = peer.obsolete_requests.remove(&response.id) { + if peer.obsolete_requests.remove(&response.id).is_some() { trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", who, response.id); return None; } @@ -583,7 +583,7 @@ impl Protocol { Ok(message) => message, Err(err) => { debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); return CustomMessageOutcome::None; } }; @@ -633,7 +633,7 @@ impl Protocol { GenericMessage::Consensus(msg) => return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { CustomMessageOutcome::NotificationsReceived { - remote: who.clone(), + remote: who, messages: vec![(msg.engine_id, From::from(msg.data))], } } else { @@ -655,7 +655,7 @@ impl Protocol { return if !messages.is_empty() { CustomMessageOutcome::NotificationsReceived { - remote: who.clone(), + remote: who, messages, } } else { @@ -713,7 +713,7 @@ impl Protocol { self.context_data.peers.remove(&peer) }; if let Some(_peer_data) = removed { - self.sync.peer_disconnected(peer.clone()); + self.sync.peer_disconnected(&peer); // Notify all the notification protocols as closed. CustomMessageOutcome::NotificationStreamClosed { @@ -774,9 +774,9 @@ impl Protocol { if blocks.len() >= max { break; } - let number = header.number().clone(); + let number = *header.number(); let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); + let parent_hash = *header.parent_hash(); let justification = if get_justification { self.context_data.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) } else { @@ -875,7 +875,7 @@ impl Protocol { return CustomMessageOutcome::None } - match self.sync.on_block_data(peer, Some(request), response) { + match self.sync.on_block_data(&peer, Some(request), response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), Ok(sync::OnBlockData::Request(peer, req)) => { @@ -1329,7 +1329,7 @@ impl Protocol { version: CURRENT_VERSION, min_supported_version: MIN_VERSION, genesis_hash: info.genesis_hash, - roles: self.config.roles.into(), + roles: self.config.roles, best_number: info.best_number, best_hash: info.best_hash, chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible @@ -1355,7 +1355,7 @@ impl Protocol { message::BlockState::Normal => false, }; - match self.sync.on_block_announce(who.clone(), hash, &announce, is_their_best) { + match self.sync.on_block_announce(&who, hash, &announce, is_their_best) { sync::OnBlockAnnounce::Nothing => { // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` // when we have all data required to import the block @@ -1375,7 +1375,7 @@ impl Protocol { // to import header from announced block let's construct response to request that normally would have // been sent over network (but it is not in our case) let blocks_to_import = self.sync.on_block_data( - who.clone(), + &who, None, message::generic::BlockResponse { id: 0, @@ -1911,7 +1911,7 @@ fn send_request( if let GenericMessage::BlockRequest(ref mut r) = message { if let Some(ref mut peer) = peers.get_mut(who) { r.id = peer.next_request_id; - peer.next_request_id = peer.next_request_id + 1; + peer.next_request_id += 1; if let Some((timestamp, request)) = peer.block_request.take() { trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); peer.obsolete_requests.insert(request.id, timestamp); @@ -2002,7 +2002,7 @@ impl NetworkBehaviour for Protocol { for (id, r) in self.sync.block_requests() { if self.use_new_block_requests_protocol { let event = CustomMessageOutcome::BlockRequest { - target: id, + target: id.clone(), request: r, }; self.pending_messages.push_back(event); @@ -2073,11 +2073,11 @@ impl NetworkBehaviour for Protocol { let outcome = match event { GenericProtoOut::CustomProtocolOpen { peer_id, .. } => { - self.on_peer_connected(peer_id.clone()); + self.on_peer_connected(peer_id); CustomMessageOutcome::None } GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { - self.on_peer_disconnected(peer_id.clone()) + self.on_peer_disconnected(peer_id) }, GenericProtoOut::LegacyMessage { peer_id, message } => self.on_custom_message(peer_id, message), diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 7bb5d986ca300..4984c0d86d9c7 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -507,7 +507,7 @@ impl GenericProto { /// /// Can be called multiple times with the same `PeerId`s. pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { + self.peerset.discovered(peer_ids.map(|peer_id| { debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); peer_id })); @@ -616,8 +616,8 @@ impl GenericProto { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ until {:?}", occ_entry.key(), until); *occ_entry.into_mut() = PeerState::PendingRequest { - timer: futures_timer::Delay::new(until.clone() - now), - timer_deadline: until.clone(), + timer: futures_timer::Delay::new(*until - now), + timer_deadline: *until, }; }, @@ -639,8 +639,8 @@ impl GenericProto { occ_entry.key(), banned); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { open, - timer: futures_timer::Delay::new(banned.clone() - now), - timer_deadline: banned.clone(), + timer: futures_timer::Delay::new(*banned - now), + timer_deadline: *banned, }; }, @@ -879,7 +879,7 @@ impl NetworkBehaviour for GenericProto { // this peer", and not "banned" in the sense that we would refuse the peer altogether. (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index.clone(); + let incoming_id = self.next_incoming_index; self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { Some(v) => v, None => { @@ -1200,7 +1200,7 @@ impl NetworkBehaviour for GenericProto { debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); let event = GenericProtoOut::CustomProtocolClosed { reason, - peer_id: source.clone(), + peer_id: source, }; self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } else { @@ -1384,7 +1384,7 @@ impl NetworkBehaviour for GenericProto { *peer_state = PeerState::Enabled { open }; } - st @ _ => *peer_state = st, + st => *peer_state = st, } } diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 0e453a368c222..625916a05e4a4 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -483,35 +483,35 @@ impl ProtocolsHandler for NotifsHandler { ) -> Poll< ProtocolsHandlerEvent > { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { + if let Poll::Ready(ev) = self.legacy.poll(cx) { + return match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol: protocol.map_upgrade(EitherUpgrade::B), info: None, }), ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { endpoint, .. }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::Open { endpoint } )), ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::Closed { endpoint, reason } )), ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::CustomMessage { message } )), ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::Clogged { messages } )), ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::ProtocolError { is_severe, error } )), ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), + Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), } } diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index cf271016e728a..efcd0a4c8fb7d 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -390,8 +390,8 @@ pub enum NotificationsOutError { /// Remote doesn't process our messages quickly enough. /// /// > **Note**: This is not necessarily the remote's fault, and could also be caused by the - /// > local node sending data too quickly. Properly doing back-pressure, however, - /// > would require a deep refactoring effort in Substrate as a whole. + /// > local node sending data too quickly. Properly doing back-pressure, however, + /// > would require a deep refactoring effort in Substrate as a whole. Clogged, } diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index b720ba0aefe4b..f4c935de5d3af 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -574,7 +574,7 @@ impl ChainSync { if number > peer.best_number { peer.best_number = number; - peer.best_hash = hash.clone(); + peer.best_hash = *hash; } self.pending_requests.add(peer_id); } @@ -639,7 +639,7 @@ impl ChainSync { } /// Get an iterator over all block requests of all peers. - pub fn block_requests(&mut self) -> impl Iterator)> + '_ { + pub fn block_requests(&mut self) -> impl Iterator)> + '_ { if self.pending_requests.is_empty() { return Either::Left(std::iter::empty()) } @@ -682,7 +682,7 @@ impl ChainSync { req, ); have_requests = true; - Some((id.clone(), req)) + Some((id, req)) } else if let Some((hash, req)) = fork_sync_request( id, fork_targets, @@ -698,7 +698,7 @@ impl ChainSync { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); have_requests = true; - Some((id.clone(), req)) + Some((id, req)) } else { None } @@ -713,24 +713,27 @@ impl ChainSync { /// /// If this corresponds to a valid block, this outputs the block that /// must be imported in the import queue. - pub fn on_block_data - (&mut self, who: PeerId, request: Option>, response: BlockResponse) -> Result, BadPeer> - { + pub fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse + ) -> Result, BadPeer> { let mut new_blocks: Vec> = - if let Some(peer) = self.peers.get_mut(&who) { + if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { trace!(target: "sync", "Reversing incoming block list"); blocks.reverse() } - self.pending_requests.add(&who); + self.pending_requests.add(who); if request.is_some() { match &mut peer.state { PeerSyncState::DownloadingNew(start_block) => { - self.blocks.clear_peer_download(&who); + self.blocks.clear_peer_download(who); let start_block = *start_block; peer.state = PeerSyncState::Available; - validate_blocks::(&blocks, &who)?; + validate_blocks::(&blocks, who)?; self.blocks.insert(start_block, blocks, who.clone()); self.blocks .drain(self.best_queued_number + One::one()) @@ -751,9 +754,9 @@ impl ChainSync { peer.state = PeerSyncState::Available; if blocks.is_empty() { debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(who, rep::NO_BLOCK)); + return Err(BadPeer(who.clone(), rep::NO_BLOCK)); } - validate_blocks::(&blocks, &who)?; + validate_blocks::(&blocks, who)?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -774,11 +777,11 @@ impl ChainSync { }, (None, _) => { debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); - return Err(BadPeer(who, rep::UNKNOWN_ANCESTOR)) + return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) }, (_, Err(e)) => { info!("❌ Error answering legitimate blockchain query: {:?}", e); - return Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) + return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) } }; if matching_hash.is_some() { @@ -794,7 +797,7 @@ impl ChainSync { } if matching_hash.is_none() && current.is_zero() { trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)) + return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) } if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) { peer.state = PeerSyncState::AncestorSearch { @@ -802,7 +805,7 @@ impl ChainSync { start: *start, state: next_state, }; - return Ok(OnBlockData::Request(who, ancestry_request::(next_num))) + return Ok(OnBlockData::Request(who.clone(), ancestry_request::(next_num))) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown to us and // add it to sync targets if necessary. @@ -838,7 +841,7 @@ impl ChainSync { } } else { // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, &who)?; + validate_blocks::(&blocks, who)?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -869,7 +872,7 @@ impl ChainSync { // So the only way this can happen is when peers lie about the // common block. debug!(target: "sync", "Ignoring known blocks from {}", who); - return Err(BadPeer(who, rep::KNOWN_BLOCK)); + return Err(BadPeer(who.clone(), rep::KNOWN_BLOCK)); } let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); @@ -1124,7 +1127,7 @@ impl ChainSync { /// Updates our internal state for best queued block and then goes /// through all peers to update our view of their state as well. fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - if let Some(_) = self.fork_targets.remove(&hash) { + if self.fork_targets.remove(&hash).is_some() { trace!(target: "sync", "Completed fork sync {:?}", hash); } if number > self.best_queued_number { @@ -1162,7 +1165,7 @@ impl ChainSync { /// header (call `on_block_data`). The network request isn't sent /// in this case. Both hash and header is passed as an optimization /// to avoid rehashing the header. - pub fn on_block_announce(&mut self, who: PeerId, hash: B::Hash, announce: &BlockAnnounce, is_best: bool) + pub fn on_block_announce(&mut self, who: &PeerId, hash: B::Hash, announce: &BlockAnnounce, is_best: bool) -> OnBlockAnnounce { let header = &announce.header; @@ -1177,7 +1180,7 @@ impl ChainSync { let ancient_parent = parent_status == BlockStatus::InChainPruned; let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&who) { + let peer = if let Some(peer) = self.peers.get_mut(who) { peer } else { error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); @@ -1206,13 +1209,13 @@ impl ChainSync { peer.common_number = number - One::one(); } } - self.pending_requests.add(&who); + self.pending_requests.add(who); // known block case if known || self.is_already_downloading(&hash) { trace!(target: "sync", "Known block announce from {}: {}", who, hash); if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(who); + target.peers.insert(who.clone()); } return OnBlockAnnounce::Nothing } @@ -1251,21 +1254,21 @@ impl ChainSync { .entry(hash.clone()) .or_insert_with(|| ForkTarget { number, - parent_hash: Some(header.parent_hash().clone()), + parent_hash: Some(*header.parent_hash()), peers: Default::default(), }) - .peers.insert(who); + .peers.insert(who.clone()); } OnBlockAnnounce::Nothing } /// Call when a peer has disconnected. - pub fn peer_disconnected(&mut self, who: PeerId) { - self.blocks.clear_peer_download(&who); - self.peers.remove(&who); - self.extra_justifications.peer_disconnected(&who); - self.extra_finality_proofs.peer_disconnected(&who); + pub fn peer_disconnected(&mut self, who: &PeerId) { + self.blocks.clear_peer_download(who); + self.peers.remove(who); + self.extra_justifications.peer_disconnected(who); + self.extra_finality_proofs.peer_disconnected(who); self.pending_requests.set_all(); } @@ -1471,11 +1474,12 @@ fn fork_sync_request( } if r.number <= best_num { let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); - let mut count = (r.number - finalized).saturated_into::(); // up to the last finalized block - if parent_status != BlockStatus::Unknown { + let count = if parent_status == BlockStatus::Unknown { + (r.number - finalized).saturated_into::() // up to the last finalized block + } else { // request only single block - count = 1; - } + 1 + }; trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); return Some((hash.clone(), message::generic::BlockRequest { id: 0, diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index e59f22509c6ce..c79c0d7f51ecd 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -18,7 +18,6 @@ use std::cmp; use std::ops::Range; use std::collections::{HashMap, BTreeMap}; -use std::collections::hash_map::Entry; use log::trace; use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; @@ -117,17 +116,17 @@ impl BlockCollection { let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; loop { let next = downloading_iter.next(); - break match &(prev, next) { - &(Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) + break match (prev, next) { + (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < max_parallel => (*start .. *start + *len, downloading), - &(Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => + (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - &(Some((start, r)), None) => + (Some((start, r)), None) => (*start + r.len() .. *start + r.len() + count, 0), // last range - &(None, None) => + (None, None) => (first_different .. first_different + count, 0), // empty - &(None, Some((start, _))) if *start > first_different => + (None, Some((start, _))) if *start > first_different => (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start _ => { prev = next; @@ -168,7 +167,7 @@ impl BlockCollection { let mut prev = from; for (start, range_data) in &mut self.blocks { match range_data { - &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { + BlockRangeState::Complete(blocks) if *start <= prev => { prev = *start + (blocks.len() as u32).into(); // Remove all elements from `blocks` and add them to `drained` drained.append(blocks); @@ -186,26 +185,22 @@ impl BlockCollection { } pub fn clear_peer_download(&mut self, who: &PeerId) { - match self.peer_requests.entry(who.clone()) { - Entry::Occupied(entry) => { - let start = entry.remove(); - let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { - *downloading = *downloading - 1; - false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, - _ => { - false - } - }; - if remove { - self.blocks.remove(&start); + if let Some(start) = self.peer_requests.remove(who) { + let remove = match self.blocks.get_mut(&start) { + Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { + *downloading -= 1; + false + }, + Some(&mut BlockRangeState::Downloading { .. }) => { + true + }, + _ => { + false } - }, - _ => (), + }; + if remove { + self.blocks.remove(&start); + } } } } diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 09fcdcf49159a..1e63749c25a9f 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -103,11 +103,9 @@ impl ExtraRequests { // we have finalized further than the given request, presumably // by some other part of the system (not sync). we can safely // ignore the `Revert` error. - return; }, Err(err) => { debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); - return; } _ => () } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index f80801b1cc912..ba6c7a39b4b21 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -195,7 +195,7 @@ impl NetworkWorker { let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) - .unwrap_or(Arc::new(AlwaysBadChecker)); + .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -320,7 +320,7 @@ impl NetworkWorker { is_major_syncing: is_major_syncing.clone(), peerset: peerset_handle, local_peer_id, - to_worker: to_worker.clone(), + to_worker, _marker: PhantomData, }); @@ -447,7 +447,7 @@ impl NetworkWorker { Some((peer_id.to_base58(), NetworkStatePeer { endpoint, version_string: swarm.node(peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())).clone(), + .and_then(|i| i.client_version().map(|s| s.to_owned())), latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()), enabled: swarm.user_protocol().is_enabled(&peer_id), open: swarm.user_protocol().is_open(&peer_id), @@ -463,7 +463,7 @@ impl NetworkWorker { list.into_iter().map(move |peer_id| { (peer_id.to_base58(), NetworkStateNotConnectedPeer { version_string: swarm.node(&peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())).clone(), + .and_then(|i| i.client_version().map(|s| s.to_owned())), latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) .into_iter().collect(), @@ -608,7 +608,7 @@ impl NetworkService { pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::RequestJustification(hash.clone(), number)); + .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } /// Are we in the process of downloading the chain?