Skip to content

Commit

Permalink
chore: remove some unnecessary calls to unwrap/expect (#6727)
Browse files Browse the repository at this point in the history
  • Loading branch information
DaniPopes committed Feb 22, 2024
1 parent c7ae4ef commit 9ca813a
Show file tree
Hide file tree
Showing 21 changed files with 190 additions and 202 deletions.
2 changes: 1 addition & 1 deletion crates/ethereum-forks/src/forkid.rs
Expand Up @@ -365,7 +365,7 @@ impl Cache {

// Create ForkId using the last past fork's hash and the next epoch start.
let fork_id = ForkId {
hash: past.last().expect("there is always at least one - genesis - fork hash; qed").1,
hash: past.last().expect("there is always at least one - genesis - fork hash").1,
next: epoch_end.unwrap_or(ForkFilterKey::Block(0)).into(),
};

Expand Down
3 changes: 1 addition & 2 deletions crates/net/discv4/src/lib.rs
Expand Up @@ -545,8 +545,7 @@ impl Discv4Service {
for (key, val) in config.additional_eip868_rlp_pairs.iter() {
builder.add_value_rlp(key, val.clone());
}

builder.build(&secret_key).expect("v4 is set; qed")
builder.build(&secret_key).expect("v4 is set")
};

let (to_service, commands_rx) = mpsc::unbounded_channel();
Expand Down
2 changes: 1 addition & 1 deletion crates/net/discv4/src/proto.rs
Expand Up @@ -113,7 +113,7 @@ impl Message {
// Sign the payload with the secret key using recoverable ECDSA
let signature: RecoverableSignature = SECP256K1.sign_ecdsa_recoverable(
&secp256k1::Message::from_slice(keccak256(&payload).as_ref())
.expect("is correct MESSAGE_SIZE; qed"),
.expect("B256.len() == MESSAGE_SIZE"),
secret_key,
);

Expand Down
20 changes: 9 additions & 11 deletions crates/net/eth-wire/src/capability.rs
Expand Up @@ -359,32 +359,32 @@ pub struct SharedCapabilities(Vec<SharedCapability>);

impl SharedCapabilities {
/// Merges the local and peer capabilities and returns a new [`SharedCapabilities`] instance.
#[inline]
pub fn try_new(
local_protocols: Vec<Protocol>,
peer_capabilities: Vec<Capability>,
) -> Result<Self, P2PStreamError> {
Ok(Self(shared_capability_offsets(local_protocols, peer_capabilities)?))
shared_capability_offsets(local_protocols, peer_capabilities).map(Self)
}

/// Iterates over the shared capabilities.
#[inline]
pub fn iter_caps(&self) -> impl Iterator<Item = &SharedCapability> {
self.0.iter()
}

/// Returns the eth capability if it is shared.
#[inline]
pub fn eth(&self) -> Result<&SharedCapability, P2PStreamError> {
for cap in self.iter_caps() {
if cap.is_eth() {
return Ok(cap)
}
}
Err(P2PStreamError::CapabilityNotShared)
self.iter_caps().find(|c| c.is_eth()).ok_or(P2PStreamError::CapabilityNotShared)
}

/// Returns the negotiated eth version if it is shared.
#[inline]
pub fn eth_version(&self) -> Result<EthVersion, P2PStreamError> {
self.eth().map(|cap| cap.eth_version().expect("is eth; qed"))
self.iter_caps()
.find_map(SharedCapability::eth_version)
.ok_or(P2PStreamError::CapabilityNotShared)
}

/// Returns true if the shared capabilities contain the given capability.
Expand Down Expand Up @@ -526,15 +526,13 @@ pub fn shared_capability_offsets(
// alphabetic order.
let mut offset = MAX_RESERVED_MESSAGE_ID + 1;
for name in shared_capability_names {
let proto_version = shared_capabilities.get(&name).expect("shared; qed");

let proto_version = &shared_capabilities[&name];
let shared_capability = SharedCapability::new(
&name,
proto_version.version as u8,
offset,
proto_version.messages,
)?;

offset += shared_capability.num_messages();
shared_with_offsets.push(shared_capability);
}
Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/fetch/mod.rs
Expand Up @@ -138,7 +138,7 @@ impl StateFetcher {

let Some(peer_id) = self.next_peer() else { return PollAction::NoPeersAvailable };

let request = self.queued_requests.pop_front().expect("not empty; qed");
let request = self.queued_requests.pop_front().expect("not empty");
let request = self.prepare_block_request(peer_id, request);

PollAction::Ready(FetchAction::BlockRequest { peer_id, request })
Expand Down
53 changes: 28 additions & 25 deletions crates/net/network/src/transactions/mod.rs
Expand Up @@ -857,36 +857,39 @@ where
NetworkEvent::SessionEstablished {
peer_id, client_version, messages, version, ..
} => {
// insert a new peer into the peerset
self.peers.insert(peer_id, Peer::new(messages, version, client_version));

// Send a `NewPooledTransactionHashes` to the peer with up to
// `NEW_POOLED_TRANSACTION_HASHES_SOFT_LIMIT` transactions in the
// pool
if !self.network.is_initially_syncing() {
if self.network.tx_gossip_disabled() {
return
// Insert a new peer into the peerset.
let peer = Peer::new(messages, version, client_version);
let peer = match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => {
entry.insert(peer);
entry.into_mut()
}
let peer = self.peers.get_mut(&peer_id).expect("is present; qed");

let mut msg_builder = PooledTransactionsHashesBuilder::new(version);
Entry::Vacant(entry) => entry.insert(peer),
};

let pooled_txs = self.pool.pooled_transactions_max(
SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE,
);
if pooled_txs.is_empty() {
// do not send a message if there are no transactions in the pool
return
}
// Send a `NewPooledTransactionHashes` to the peer with up to
// `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE`
// transactions in the pool.
if self.network.is_initially_syncing() || self.network.tx_gossip_disabled() {
return
}

for pooled_tx in pooled_txs.into_iter() {
peer.seen_transactions.insert(*pooled_tx.hash());
msg_builder.push_pooled(pooled_tx);
}
let pooled_txs = self.pool.pooled_transactions_max(
SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE,
);
if pooled_txs.is_empty() {
// do not send a message if there are no transactions in the pool
return
}

let msg = msg_builder.build();
self.network.send_transactions_hashes(peer_id, msg);
let mut msg_builder = PooledTransactionsHashesBuilder::new(version);
for pooled_tx in pooled_txs {
peer.seen_transactions.insert(*pooled_tx.hash());
msg_builder.push_pooled(pooled_tx);
}

let msg = msg_builder.build();
self.network.send_transactions_hashes(peer_id, msg);
}
_ => {}
}
Expand Down
3 changes: 2 additions & 1 deletion crates/primitives/src/receipt.rs
Expand Up @@ -500,6 +500,8 @@ impl<'a> ReceiptWithBloomEncoder<'a> {
}

match self.receipt.tx_type {
TxType::Legacy => unreachable!("legacy already handled"),

TxType::EIP2930 => {
out.put_u8(0x01);
}
Expand All @@ -513,7 +515,6 @@ impl<'a> ReceiptWithBloomEncoder<'a> {
TxType::DEPOSIT => {
out.put_u8(0x7E);
}
_ => unreachable!("legacy handled; qed."),
}
out.put_slice(payload.as_ref());
}
Expand Down
14 changes: 6 additions & 8 deletions crates/primitives/src/transaction/signature.rs
Expand Up @@ -42,18 +42,16 @@ impl Compact for Signature {
where
B: bytes::BufMut + AsMut<[u8]>,
{
buf.put_slice(self.r.as_le_bytes().as_ref());
buf.put_slice(self.s.as_le_bytes().as_ref());
buf.put_slice(&self.r.as_le_bytes());
buf.put_slice(&self.s.as_le_bytes());
self.odd_y_parity as usize
}

fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) {
let r = U256::try_from_le_slice(&buf[..32]).expect("qed");
buf.advance(32);

let s = U256::try_from_le_slice(&buf[..32]).expect("qed");
buf.advance(32);

assert!(buf.len() >= 64);
let r = U256::from_le_slice(&buf[0..32]);
let s = U256::from_le_slice(&buf[32..64]);
buf.advance(64);
(Signature { r, s, odd_y_parity: identifier != 0 }, buf)
}
}
Expand Down
9 changes: 5 additions & 4 deletions crates/prune/src/segments/receipts_by_logs.rs
Expand Up @@ -158,11 +158,12 @@ impl<DB: Database> Segment<DB> for ReceiptsByLogs {
// For accurate checkpoints we need to know that we have checked every transaction.
// Example: we reached the end of the range, and the last receipt is supposed to skip
// its deletion.
last_pruned_transaction =
Some(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction));
let last_pruned_transaction = *last_pruned_transaction
.insert(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction));

last_pruned_block = Some(
provider
.transaction_block(last_pruned_transaction.expect("qed"))?
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more receipts to prune, set the checkpoint block number to
// previous, so we could finish pruning its receipts on the
Expand All @@ -175,7 +176,7 @@ impl<DB: Database> Segment<DB> for ReceiptsByLogs {
break
}

from_tx_number = last_pruned_transaction.expect("qed") + 1;
from_tx_number = last_pruned_transaction + 1;
}

// If there are contracts using `PruneMode::Distance(_)` there will be receipts before
Expand Down
2 changes: 1 addition & 1 deletion crates/rpc/ipc/src/server/future.rs
Expand Up @@ -195,7 +195,7 @@ impl ConnectionGuard {
match self.0.clone().try_acquire_owned() {
Ok(guard) => Some(guard),
Err(TryAcquireError::Closed) => {
unreachable!("Semaphore::Close is never called and can't be closed; qed")
unreachable!("Semaphore::Close is never called and can't be closed")
}
Err(TryAcquireError::NoPermits) => None,
}
Expand Down
139 changes: 69 additions & 70 deletions crates/rpc/rpc-builder/src/lib.rs
Expand Up @@ -1272,76 +1272,77 @@ where
where
F: FnOnce(&EthHandlers<Provider, Pool, Network, Events, EvmConfig>) -> R,
{
if self.eth.is_none() {
let cache = EthStateCache::spawn_with(
self.provider.clone(),
self.config.eth.cache.clone(),
self.executor.clone(),
self.evm_config.clone(),
);
let gas_oracle = GasPriceOracle::new(
self.provider.clone(),
self.config.eth.gas_oracle.clone(),
cache.clone(),
);
let new_canonical_blocks = self.events.canonical_state_stream();
let c = cache.clone();

self.executor.spawn_critical(
"cache canonical blocks task",
Box::pin(async move {
cache_new_blocks_task(c, new_canonical_blocks).await;
}),
);
f(match &self.eth {
Some(eth) => eth,
None => self.eth.insert(self.init_eth()),
})
}

let fee_history_cache =
FeeHistoryCache::new(cache.clone(), self.config.eth.fee_history_cache.clone());
let new_canonical_blocks = self.events.canonical_state_stream();
let fhc = fee_history_cache.clone();
let provider_clone = self.provider.clone();
self.executor.spawn_critical(
"cache canonical blocks for fee history task",
Box::pin(async move {
fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider_clone)
.await;
}),
);
fn init_eth(&self) -> EthHandlers<Provider, Pool, Network, Events, EvmConfig> {
let cache = EthStateCache::spawn_with(
self.provider.clone(),
self.config.eth.cache.clone(),
self.executor.clone(),
self.evm_config.clone(),
);
let gas_oracle = GasPriceOracle::new(
self.provider.clone(),
self.config.eth.gas_oracle.clone(),
cache.clone(),
);
let new_canonical_blocks = self.events.canonical_state_stream();
let c = cache.clone();

self.executor.spawn_critical(
"cache canonical blocks task",
Box::pin(async move {
cache_new_blocks_task(c, new_canonical_blocks).await;
}),
);

let executor = Box::new(self.executor.clone());
let blocking_task_pool =
BlockingTaskPool::build().expect("failed to build tracing pool");
let api = EthApi::with_spawner(
self.provider.clone(),
self.pool.clone(),
self.network.clone(),
cache.clone(),
gas_oracle,
self.config.eth.rpc_gas_cap,
executor.clone(),
blocking_task_pool.clone(),
fee_history_cache,
self.evm_config.clone(),
);
let filter = EthFilter::new(
self.provider.clone(),
self.pool.clone(),
cache.clone(),
self.config.eth.filter_config(),
executor.clone(),
);
let fee_history_cache =
FeeHistoryCache::new(cache.clone(), self.config.eth.fee_history_cache.clone());
let new_canonical_blocks = self.events.canonical_state_stream();
let fhc = fee_history_cache.clone();
let provider_clone = self.provider.clone();
self.executor.spawn_critical(
"cache canonical blocks for fee history task",
Box::pin(async move {
fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider_clone).await;
}),
);

let pubsub = EthPubSub::with_spawner(
self.provider.clone(),
self.pool.clone(),
self.events.clone(),
self.network.clone(),
executor,
);
let executor = Box::new(self.executor.clone());
let blocking_task_pool = BlockingTaskPool::build().expect("failed to build tracing pool");
let api = EthApi::with_spawner(
self.provider.clone(),
self.pool.clone(),
self.network.clone(),
cache.clone(),
gas_oracle,
self.config.eth.rpc_gas_cap,
executor.clone(),
blocking_task_pool.clone(),
fee_history_cache,
self.evm_config.clone(),
);
let filter = EthFilter::new(
self.provider.clone(),
self.pool.clone(),
cache.clone(),
self.config.eth.filter_config(),
executor.clone(),
);

let eth = EthHandlers { api, cache, filter, pubsub, blocking_task_pool };
self.eth = Some(eth);
}
f(self.eth.as_ref().expect("exists; qed"))
let pubsub = EthPubSub::with_spawner(
self.provider.clone(),
self.pool.clone(),
self.events.clone(),
self.network.clone(),
executor,
);

EthHandlers { api, cache, filter, pubsub, blocking_task_pool }
}

/// Returns the configured [EthHandlers] or creates it if it does not exist yet
Expand Down Expand Up @@ -1643,9 +1644,7 @@ impl RpcServerConfig {
}
Some(ws_cors)
}
(None, cors @ Some(_)) => cors,
(cors @ Some(_), None) => cors,
_ => None,
(a, b) => a.or(b),
}
.cloned();

Expand All @@ -1656,7 +1655,7 @@ impl RpcServerConfig {

modules.config.ensure_ws_http_identical()?;

let builder = self.http_server_config.take().expect("is set; qed");
let builder = self.http_server_config.take().expect("http_server_config is Some");
let (server, addr) = WsHttpServerKind::build(
builder,
http_socket_addr,
Expand Down

0 comments on commit 9ca813a

Please sign in to comment.