Skip to content

Commit

Permalink
style: RFC to enable clippy::perf lints (#10378)
Browse files Browse the repository at this point in the history
During the holidays I took notice of a discussion about enabling clippy
against the `rust-lang/rust` repository. Within that discussion people
also debated which lints or lint categories they would consider
important to enable and to my surprise `clippy::perf` was one of those
categories.

So I went ahead and checked out what it entails for our nearcore
codebase as well. In broad strokes a large number of lint triggers were
in test code. Those changes aren’t particularly interesting – test code
is not particularly perf sensitive. But in certain cases it has also
resulted in cleaner code (like for instance `except(format!(...))` is a
code smell IMO.)

For regular code the most notable class of changes was about the size of
data structures. For instance, we had a couple of cases where a return
value would be a result of a small successful value and an unreasonably
large error variant. When functions like these are called frequently,
moving around the return value can become pretty darn expensive.

In many cases the lints had useful suggestions that I could apply
immediately without having to think what a better way to write down the
same code would be, so it doesn’t seem like it would be too harmful to
the developer productivity...
  • Loading branch information
nagisa committed Jan 12, 2024
1 parent 38afdd5 commit 9fc06ac
Show file tree
Hide file tree
Showing 50 changed files with 158 additions and 151 deletions.
10 changes: 7 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,18 @@ members = [
warnings = "deny"

[workspace.lints.clippy]
all = { level = "allow", priority = -1 }
all = { level = "allow", priority = -100 }
correctness = { level = "deny", priority = -50 }
suspicious = { level = "deny", priority = -50 }
perf = { level = "deny", priority = -50 }
# overrides clippy::perf = "deny": https://github.com/rust-lang/rust-clippy/issues/8111
single_char_pattern = "allow"
clone_on_copy = "deny"
correctness = "deny"
derivable_impls = "deny"
redundant_clone = "deny"
suspicious = "deny"
len_zero = "deny"


[workspace.dependencies]
actix = "0.13.0"
actix-cors = "0.6.1"
Expand Down
16 changes: 9 additions & 7 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1177,9 +1177,9 @@ impl Chain {
let chunk_proof = ChunkProofs {
block_header: borsh::to_vec(&block.header()).expect("Failed to serialize"),
merkle_proof: merkle_paths[shard_id].clone(),
chunk: MaybeEncodedShardChunk::Encoded(EncodedShardChunk::clone(
chunk: Box::new(MaybeEncodedShardChunk::Encoded(EncodedShardChunk::clone(
&encoded_chunk,
)),
))),
};
return Err(Error::InvalidChunkProofs(Box::new(chunk_proof)));
}
Expand Down Expand Up @@ -1739,10 +1739,12 @@ impl Chain {
) -> Result<AcceptedBlock, Error> {
let timer = metrics::BLOCK_POSTPROCESSING_TIME.start_timer();
let (block, block_preprocess_info) =
self.blocks_in_processing.remove(&block_hash).expect(&format!(
"block {:?} finished applying chunks but not in blocks_in_processing pool",
block_hash
));
self.blocks_in_processing.remove(&block_hash).unwrap_or_else(|| {
panic!(
"block {:?} finished applying chunks but not in blocks_in_processing pool",
block_hash
)
});
// We want to include block height here, so we didn't put this line at the beginning of the
// function.
let _span = tracing::debug_span!(
Expand Down Expand Up @@ -2841,7 +2843,7 @@ impl Chain {
let chunk_proof = ChunkProofs {
block_header: borsh::to_vec(&block.header()).expect("Failed to serialize"),
merkle_proof: merkle_paths[chunk.shard_id() as usize].clone(),
chunk: MaybeEncodedShardChunk::Decoded(chunk.clone()),
chunk: MaybeEncodedShardChunk::Decoded(chunk.clone()).into(),
};
return Err(Error::InvalidChunkProofs(Box::new(chunk_proof)));
}
Expand Down
2 changes: 1 addition & 1 deletion chain/chain/src/tests/doomslug.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ fn one_iter(
delta: Duration,
height_goal: BlockHeight,
) -> (Duration, BlockHeight) {
let account_ids = vec!["test1", "test2", "test3", "test4", "test5", "test6", "test7", "test8"];
let account_ids = ["test1", "test2", "test3", "test4", "test5", "test6", "test7", "test8"];
let stakes = account_ids
.iter()
.map(|account_id| ApprovalStake {
Expand Down
1 change: 1 addition & 0 deletions chain/chain/src/update_shard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ pub(crate) struct ReshardingData {

/// Reason to update a shard when new block appears on chain.
/// All types include state roots for children shards in case of resharding.
#[allow(clippy::large_enum_variant)]
pub(crate) enum ShardUpdateReason {
/// Block has a new chunk for the shard.
/// Contains chunk itself and all new incoming receipts to the shard.
Expand Down
4 changes: 2 additions & 2 deletions chain/chain/src/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ fn validate_chunk_proofs_challenge(
) -> Result<(CryptoHash, Vec<AccountId>), Error> {
let block_header = BlockHeader::try_from_slice(&chunk_proofs.block_header)?;
validate_header_authorship(epoch_manager, &block_header)?;
let chunk_header = match &chunk_proofs.chunk {
let chunk_header = match &*chunk_proofs.chunk {
MaybeEncodedShardChunk::Encoded(encoded_chunk) => encoded_chunk.cloned_header(),
MaybeEncodedShardChunk::Decoded(chunk) => chunk.cloned_header(),
};
Expand All @@ -271,7 +271,7 @@ fn validate_chunk_proofs_challenge(
}
// Temporary holds the decoded chunk, since we use a reference below to avoid cloning it.
let tmp_chunk;
let chunk_ref = match &chunk_proofs.chunk {
let chunk_ref = match &*chunk_proofs.chunk {
MaybeEncodedShardChunk::Encoded(encoded_chunk) => {
match encoded_chunk.decode_chunk(epoch_manager.num_data_parts()) {
Ok(chunk) => {
Expand Down
15 changes: 9 additions & 6 deletions chain/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1489,12 +1489,15 @@ impl Client {
let avg_block_prod_time = (self.config.min_block_production_delay.as_nanos()
+ self.config.max_block_production_delay.as_nanos())
/ 2;
let ns = (self.accrued_fastforward_delta as u128 * avg_block_prod_time).try_into().expect(
&format!(
"Too high of a delta_height {} to convert into u64",
self.accrued_fastforward_delta
),
);

let ns = (self.accrued_fastforward_delta as u128 * avg_block_prod_time)
.try_into()
.unwrap_or_else(|_| {
panic!(
"Too high of a delta_height {} to convert into u64",
self.accrued_fastforward_delta
)
});

chrono::Duration::nanoseconds(ns)
}
Expand Down
2 changes: 1 addition & 1 deletion chain/client/src/client_actor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ impl Handler<WithSpanContext<Status>> for ClientActor {
network_info: new_network_info_view(&self.client.chain, &self.network_info),
sync_status: format!(
"{} ({})",
self.client.sync_status.as_variant_name().to_string(),
self.client.sync_status.as_variant_name(),
display_sync_status(
&self.client.sync_status,
&self.client.chain.head()?,
Expand Down
2 changes: 1 addition & 1 deletion chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ fn test_send_tx_commit() {
FinalExecutionStatus::SuccessValue(Vec::new())
);
assert!(
vec![TxExecutionStatus::Executed, TxExecutionStatus::Final]
[TxExecutionStatus::Executed, TxExecutionStatus::Final]
.contains(&result.final_execution_status),
"All the receipts should be already executed"
);
Expand Down
2 changes: 1 addition & 1 deletion chain/network/src/peer_manager/tests/routing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1300,7 +1300,7 @@ async fn archival_node() {
let pm0_connections: HashSet<PeerId> =
pm0.with_state(|s| async move { s.tier2.load().ready.keys().cloned().collect() }).await;

let pms = vec![&pm2, &pm3, &pm4];
let pms = [&pm2, &pm3, &pm4];
let chosen = pms
.iter()
.filter(|&pm| !pm0_connections.contains(&pm.cfg.node_id()))
Expand Down
2 changes: 1 addition & 1 deletion chain/network/src/test_loop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ impl<InnerData: AsRef<AccountId>> SupportsRoutingLookup for Vec<InnerData> {
fn index_for_account(&self, account: &AccountId) -> usize {
self.iter()
.position(|data| data.as_ref() == account)
.expect(&format!("Account not found: {}", account))
.unwrap_or_else(|| panic!("Account not found: {}", account))
}
}
1 change: 1 addition & 0 deletions chain/network/src/testonly/fake_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use near_primitives::types::{AccountId, EpochId, ShardId};
use near_primitives::views::FinalExecutionOutcomeView;

#[derive(Debug, PartialEq, Eq, Clone)]
#[allow(clippy::large_enum_variant)]
pub enum Event {
AnnounceAccount(Vec<(AnnounceAccount, Option<EpochId>)>),
Block(Block),
Expand Down
3 changes: 1 addition & 2 deletions core/primitives/benches/serialization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@ fn create_transaction() -> SignedTransaction {
}

fn create_block() -> Block {
let genesis_chunks =
genesis_chunks(vec![StateRoot::new()], &vec![0], 1_000, 0, PROTOCOL_VERSION);
let genesis_chunks = genesis_chunks(vec![StateRoot::new()], &[0], 1_000, 0, PROTOCOL_VERSION);
let genesis = Block::genesis(
PROTOCOL_VERSION,
genesis_chunks.into_iter().map(|chunk| chunk.take_header()).collect(),
Expand Down
3 changes: 2 additions & 1 deletion core/primitives/src/challenge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,14 @@ pub struct ChunkProofs {
/// Merkle proof of inclusion of this chunk.
pub merkle_proof: MerklePath,
/// Invalid chunk in an encoded form or in a decoded form.
pub chunk: MaybeEncodedShardChunk,
pub chunk: Box<MaybeEncodedShardChunk>,
}

/// Either `EncodedShardChunk` or `ShardChunk`. Used for `ChunkProofs`.
/// `Decoded` is used to avoid re-encoding an already decoded chunk to construct a challenge.
/// `Encoded` is still needed in case a challenge challenges an invalid encoded chunk that can't be
/// decoded.
#[allow(clippy::large_enum_variant)] // both variants are large
#[derive(BorshSerialize, BorshDeserialize, PartialEq, Eq, Clone, Debug)]
pub enum MaybeEncodedShardChunk {
Encoded(EncodedShardChunk),
Expand Down
2 changes: 1 addition & 1 deletion core/primitives/src/epoch_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@ pub struct EpochSyncFinalizationResponse {
#[derive(BorshSerialize, BorshDeserialize, Eq, PartialEq, Debug, Clone)]
pub enum EpochSyncResponse {
UpToDate,
Advance { light_client_block_view: LightClientBlockView },
Advance { light_client_block_view: Box<LightClientBlockView> },
}
15 changes: 8 additions & 7 deletions core/primitives/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ pub enum RuntimeError {
/// It's possible the input state is invalid or malicious.
StorageError(StorageError),
/// An error happens if `check_balance` fails, which is likely an indication of an invalid state.
BalanceMismatchError(BalanceMismatchError),
BalanceMismatchError(Box<BalanceMismatchError>),
/// The incoming receipt didn't pass the validation, it's likely a malicious behaviour.
ReceiptValidationError(ReceiptValidationError),
/// Error when accessing validator information. Happens inside epoch manager.
Expand Down Expand Up @@ -193,7 +193,7 @@ impl std::error::Error for InvalidTxError {}
)]
pub enum InvalidAccessKeyError {
/// The access key identified by the `public_key` doesn't exist for the account
AccessKeyNotFound { account_id: AccountId, public_key: PublicKey },
AccessKeyNotFound { account_id: AccountId, public_key: Box<PublicKey> },
/// Transaction `receiver_id` doesn't match the access key receiver_id
ReceiverMismatch { tx_receiver: AccountId, ak_receiver: String },
/// Transaction method name isn't allowed by the access key
Expand All @@ -203,7 +203,7 @@ pub enum InvalidAccessKeyError {
/// Access Key does not have enough allowance to cover transaction cost
NotEnoughAllowance {
account_id: AccountId,
public_key: PublicKey,
public_key: Box<PublicKey>,
#[serde(with = "dec_format")]
allowance: Balance,
#[serde(with = "dec_format")]
Expand Down Expand Up @@ -247,7 +247,7 @@ pub enum ActionsValidationError {
/// The length of the arguments exceeded the limit in a Function Call action.
FunctionCallArgumentsLengthExceeded { length: u64, limit: u64 },
/// An attempt to stake with a public key that is not convertible to ristretto.
UnsuitableStakingKey { public_key: PublicKey },
UnsuitableStakingKey { public_key: Box<PublicKey> },
/// The attached amount of gas in a FunctionCall action has to be a positive number.
FunctionCallZeroAttachedGas,
/// There should be the only one DelegateAction
Expand Down Expand Up @@ -443,15 +443,16 @@ pub enum ActionErrorKind {
registrar_account_id: AccountId,
predecessor_id: AccountId,
},

/// A newly created account must be under a namespace of the creator account
CreateAccountNotAllowed { account_id: AccountId, predecessor_id: AccountId },
/// Administrative actions like `DeployContract`, `Stake`, `AddKey`, `DeleteKey`. can be proceed only if sender=receiver
/// or the first TX action is a `CreateAccount` action
ActorNoPermission { account_id: AccountId, actor_id: AccountId },
/// Account tries to remove an access key that doesn't exist
DeleteKeyDoesNotExist { account_id: AccountId, public_key: PublicKey },
DeleteKeyDoesNotExist { account_id: AccountId, public_key: Box<PublicKey> },
/// The public key is already used for an existing access key
AddKeyAlreadyExists { account_id: AccountId, public_key: PublicKey },
AddKeyAlreadyExists { account_id: AccountId, public_key: Box<PublicKey> },
/// Account is staking and can not be deleted
DeleteAccountStaking { account_id: AccountId },
/// ActionReceipt can't be completed, because the remaining balance will not be enough to cover storage.
Expand Down Expand Up @@ -744,7 +745,7 @@ impl From<StorageError> for RuntimeError {

impl From<BalanceMismatchError> for RuntimeError {
fn from(e: BalanceMismatchError) -> Self {
RuntimeError::BalanceMismatchError(e)
RuntimeError::BalanceMismatchError(Box::new(e))
}
}

Expand Down
2 changes: 1 addition & 1 deletion core/primitives/src/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ pub enum ExecutionMetadata {
/// V2: With ProfileData by legacy `Cost` enum
V2(ProfileDataV2),
/// V3: With ProfileData by gas parameters
V3(ProfileDataV3),
V3(Box<ProfileDataV3>),
}

impl fmt::Debug for ExecutionOutcome {
Expand Down
2 changes: 1 addition & 1 deletion core/primitives/src/views.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2389,7 +2389,7 @@ mod tests {
#[test]
#[cfg_attr(feature = "nightly", ignore)]
fn test_exec_metadata_v3_view() {
let metadata = ExecutionMetadata::V3(ProfileDataV3::test());
let metadata = ExecutionMetadata::V3(ProfileDataV3::test().into());
let view = ExecutionMetadataView::from(metadata);
insta::assert_json_snapshot!(view);
}
Expand Down
19 changes: 8 additions & 11 deletions core/store/src/cold_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use near_primitives::hash::CryptoHash;
use near_primitives::shard_layout::ShardLayout;
use near_primitives::sharding::ShardChunk;
use near_primitives::types::BlockHeight;
use std::collections::HashMap;
use std::collections::{hash_map, HashMap};
use std::io;
use strum::IntoEnumIterator;

Expand Down Expand Up @@ -513,12 +513,9 @@ impl StoreWithCache<'_> {
}

pub fn get(&mut self, column: DBCol, key: &[u8]) -> io::Result<StoreValue> {
if !self.cache.contains_key(&(column, key.to_vec())) {
if let hash_map::Entry::Vacant(e) = self.cache.entry((column, key.to_vec())) {
crate::metrics::COLD_MIGRATION_READS.with_label_values(&[<&str>::from(column)]).inc();
self.cache.insert(
(column, key.to_vec()),
self.store.get(column, key)?.map(|x| x.as_slice().to_vec()),
);
e.insert(self.store.get(column, key)?.map(|x| x.as_slice().to_vec()));
}
Ok(self.cache[&(column, key.to_vec())].clone())
}
Expand Down Expand Up @@ -637,7 +634,7 @@ mod test {
assert_eq!(
HashSet::<StoreKey>::from_iter(combine_keys(
&key_type_to_keys,
&vec![DBKeyType::BlockHash, DBKeyType::BlockHeight]
&[DBKeyType::BlockHash, DBKeyType::BlockHeight]
)),
HashSet::<StoreKey>::from_iter(vec![
vec![1, 2, 3, 0, 1],
Expand All @@ -650,7 +647,7 @@ mod test {
assert_eq!(
HashSet::<StoreKey>::from_iter(combine_keys(
&key_type_to_keys,
&vec![DBKeyType::BlockHeight, DBKeyType::BlockHash, DBKeyType::BlockHeight]
&[DBKeyType::BlockHeight, DBKeyType::BlockHash, DBKeyType::BlockHeight]
)),
HashSet::<StoreKey>::from_iter(vec![
vec![0, 1, 1, 2, 3, 0, 1],
Expand All @@ -667,21 +664,21 @@ mod test {
assert_eq!(
HashSet::<StoreKey>::from_iter(combine_keys(
&key_type_to_keys,
&vec![DBKeyType::ShardId, DBKeyType::BlockHeight]
&[DBKeyType::ShardId, DBKeyType::BlockHeight]
)),
HashSet::<StoreKey>::from_iter(vec![])
);

assert_eq!(
HashSet::<StoreKey>::from_iter(combine_keys(
&key_type_to_keys,
&vec![DBKeyType::BlockHash, DBKeyType::ShardId]
&[DBKeyType::BlockHash, DBKeyType::ShardId]
)),
HashSet::<StoreKey>::from_iter(vec![])
);

assert_eq!(
HashSet::<StoreKey>::from_iter(combine_keys(&key_type_to_keys, &vec![])),
HashSet::<StoreKey>::from_iter(combine_keys(&key_type_to_keys, &[])),
HashSet::<StoreKey>::from_iter(vec![vec![]])
);
}
Expand Down
8 changes: 2 additions & 6 deletions core/store/src/db/colddb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -233,9 +233,7 @@ mod test {
// State -> rc
// Block -> no rc

let mut ops = vec![];
ops.push(set_with_rc(DBCol::State, &[SHARD, HASH].concat()));
ops.push(set(DBCol::Block, HASH));
let ops = vec![set_with_rc(DBCol::State, &[SHARD, HASH].concat()), set(DBCol::Block, HASH)];
db.write(DBTransaction { ops }).unwrap();

// Fetch data
Expand Down Expand Up @@ -295,9 +293,7 @@ mod test {
// Block -> no rc

// Populate data
let mut ops = vec![];
ops.push(set_with_rc(DBCol::State, &[SHARD, HASH].concat()));
ops.push(set(DBCol::Block, HASH));
let ops = vec![set_with_rc(DBCol::State, &[SHARD, HASH].concat()), set(DBCol::Block, HASH)];
db.write(DBTransaction { ops }).unwrap();

let mut result = Vec::<String>::new();
Expand Down
Loading

0 comments on commit 9fc06ac

Please sign in to comment.